prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>manager.py<|end_file_name|><|fim▁begin|># Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2017 Georgi Georgiev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
#ge0rgi:added is_volume_trusted
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder.common import constants
from cinder import compute
from cinder import context
from cinder import coordination
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder import keymgr as key_manager
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder.image import image_utils
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import cgsnapshot
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import group_types
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
from cinder.scheduler.filters.asset_tag_filter import TrustAssertionFilter
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',)
VA_LIST = objects.VolumeAttachmentList
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.ibm.xiv_ds8k':
'cinder.volume.drivers.ibm.ibm_storage',
'cinder.volume.drivers.emc.scaleio':
'cinder.volume.drivers.dell_emc.scaleio.driver',
'cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver':
'cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver',
'cinder.volume.drivers.emc.xtremio.XtremIOISCSIDriver':
'cinder.volume.drivers.dell_emc.xtremio.XtremIOISCSIDriver',
'cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver':
'cinder.volume.drivers.dell_emc.xtremio.XtremIOFCDriver',
'cinder.volume.drivers.datera.DateraDriver':
'cinder.volume.drivers.datera.datera_iscsi.DateraDriver',
'cinder.volume.drivers.emc.emc_vmax_iscsi.EMCVMAXISCSIDriver':
'cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver',
'cinder.volume.drivers.emc.emc_vmax_fc.EMCVMAXFCDriver':
'cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver',
'cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver':
'cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver',
}
class VolumeManager(manager.CleanableManager,
manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment', 'group'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, in case there was a restart or something
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
constants.VOLUME_BINARY)
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info(_LI("Service not found for updating "
"active_backend_id, assuming default "
"for driver init."))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW("Suppressing requests library SSL Warnings"))
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.key_manager = key_manager.API(CONF)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
cluster_name=self.cluster,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
if self.cluster and not self.driver.SUPPORTS_ACTIVE_ACTIVE:
msg = _LE('Active-Active configuration is not currently supported '
'by driver %s.') % volume_driver
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
{'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
{'host': self.host})
self.image_volume_cache = None
if CONF.trusted_computing:
self.asset_tag_filter = TrustAssertionFilter()
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# items to the update if they're relevant but we need to be safe in
# what we allow and add a list of allowed keys. Things that make sense
# are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']])
if update:
update = update[0]
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
# NOTE(jdg): snapshots are slighty harder, because
# we do not have a host column and of course no get
# all by host, so we use a get_all and bounce our
# response off of it
if snapshot_updates:
cinder_snaps = self.db.snapshot_get_all(ctxt)
for snap in cinder_snaps:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def _include_resources_in_cluster(self, ctxt):
LOG.info(_LI('Including all resources from host %(host)s in cluster '
'%(cluster)s.'),
{'host': self.host, 'cluster': self.cluster})
num_vols = objects.VolumeList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cache = db.image_volume_cache_include_in_cluster(
ctxt, self.cluster, host=self.host)
LOG.info(_LI('%(num_vols)s volumes, %(num_cgs)s consistency groups, '
'and %(num_cache)s image volume caches from host '
'%(host)s have been included in cluster %(cluster)s.'),
{'num_vols': num_vols, 'num_cgs': num_cgs,
'host': self.host, 'cluster': self.cluster,
'num_cache': num_cache})
def init_host(self, added_to_cluster=None, **kwargs):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
if not self.driver.supported:
utils.log_unsupported_driver_warning(self.driver)
if not self.configuration.enable_unsupported_driver:
LOG.error(_LE("Unsupported drivers are disabled."
" You can re-enable by adding "
"enable_unsupported_driver=True to the "
"driver section in cinder.conf"),
resource={'type': 'driver',
'id': self.__class__.__name__})
return
# If we have just added this host to a cluster we have to include all
# our resources in that cluster.
if added_to_cluster:
self._include_resources_in_cluster(ctxt)
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
volumes = self._get_my_volumes(ctxt)
snapshots = self._get_my_snapshots(ctxt)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
try:
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
volume.conditional_update({'status': 'error'},
{'status': 'in-use'})
# All other cleanups are processed by parent class CleanableManager
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
# Keep the image tmp file clean when init host.
backend_name = vol_utils.extract_host(self.service_topic_queue)
image_utils.cleanup_temporary_file(backend_name)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
# Make sure to call CleanableManager to do the cleanup
super(VolumeManager, self).init_host(added_to_cluster=added_to_cluster,
**kwargs)
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
# Make sure the driver is initialized first
utils.log_unsupported_driver_warning(self.driver)
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
LOG.error(_LE("Cannot complete RPC initialization because "
"driver isn't initialized properly."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
return
stats = self.driver.get_volume_stats(refresh=True)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
constants.VOLUME_BINARY)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if service.replication_status != (
fields.ReplicationStatus.FAILED_OVER):
if stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def _do_cleanup(self, ctxt, vo_resource):
if isinstance(vo_resource, objects.Volume):
if vo_resource.status == 'downloading':
self.driver.clear_download(ctxt, vo_resource)
elif vo_resource.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, vo_resource.id)
elif vo_resource.status == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
vo_resource, cascade=True)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, vo_resource, cascade=True)
# We signal that we take care of cleaning the worker ourselves
# (with set_workers decorator in delete_volume method) so
# do_cleanup method doesn't need to remove it.
return True
# For Volume creating and downloading and for Snapshot downloading
# statuses we have to set status to error
if vo_resource.status in ('creating', 'downloading'):
vo_resource.status = 'error'
vo_resource.save()
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def _set_resource_host(self, resource):
"""Set the host field on the DB to our own when we are clustered."""
if (resource.is_clustered and
not vol_utils.hosts_are_equivalent(resource.host, self.host)):
pool = vol_utils.extract_host(resource.host, 'pool')
resource.host = vol_utils.append_host(self.host, pool)
resource.save()
@objects.Volume.set_workers
def create_volume(self, context, volume, request_spec=None,
filter_properties=None, allow_reschedule=True):
"""Creates the volume."""
# Log about unsupported drivers
utils.log_unsupported_driver_warning(self.driver)
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(volume)
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = objects.RequestSpec()
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
with coordination.Lock(locked_action):
_run_flow()
finally:
try:
flow_engine.storage.fetch('refreshed')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if not rescheduled:
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(volume)
LOG.info(_LI("Created volume successfully."), resource=volume)
return volume.id
def _check_is_our_resource(self, resource):
if resource.host:
res_backend = vol_utils.extract_host(resource.service_topic_queue)
backend = vol_utils.extract_host(self.service_topic_queue)
if res_backend != backend:
msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not '
'local to %(backend)s.') %
{'resource': resource.obj_name, 'id': resource.id,
'backend': backend})
raise exception.Invalid(msg)
@coordination.synchronized('{volume.id}-{f_name}')
@objects.Volume.set_workers
def delete_volume(self, context, volume, unmanage_only=False,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume.id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED:
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
notification = "delete.start"
if unmanage_only:
notification = "unmanage.start"
self._notify_about_volume_usage(context, volume, notification)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
new_status = 'error_deleting'
if unmanage_only is True:
new_status = 'error_unmanaging'
self._clear_db(context, is_migrating_dest, volume,
new_status)
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reservations = None
if volume.status != 'error_managing_deleting':
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume.id)
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
notification = "delete.end"
if unmanage_only:
notification = "unmanage.end"
self._notify_about_volume_usage(context, volume, notification)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
size = volume.size
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
msg = _LI("Deleted volume successfully.")
if unmanage_only:
msg = _LI("Unmanaged volume successfully.")
LOG.info(msg, resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
@objects.Snapshot.set_workers
def create_snapshot(self, context, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, snapshot.volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, snapshot.volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': snapshot.volume_id},
resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@coordination.synchronized('{snapshot.id}-{f_name}')
def delete_snapshot(self, context, snapshot, unmanage_only=False):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
msg = _LI("Delete snapshot completed successfully.")
if unmanage_only:
msg = _LI("Unmanage snapshot completed successfully.")
LOG.info(msg, resource=snapshot)
@coordination.synchronized('{volume_id}')
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode, volume=None):
"""Updates db to show volume is attached."""
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look
# up the volume by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
# Get admin_metadata. This needs admin context.
with volume.obj_as_admin():
volume_metadata = volume.admin_metadata
# check the volume status before attaching
if volume.status == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume.status == 'in-use' and not volume.multiattach
and not volume.migration_status):
raise exception.InvalidVolume(
reason=_("volume is already attached"))
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = (
VA_LIST.get_all_by_instance_uuid(
context, instance_uuid))
else:
attachments = (
VA_LIST.get_all_by_host(
context, host_name_sanitized))
if attachments:
# check if volume<->instance mapping is already tracked in DB
for attachment in attachments:
if attachment['volume_id'] == volume_id:
volume.status = 'in-use'
volume.save()
return attachment
self._notify_about_volume_usage(context, volume,
"attach.start")
attachment = volume.begin_attach(mode)
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
raise exception.InvalidUUID(uuid=instance_uuid)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
self.message_api.create(
context, defined_messages.EventIds.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume.id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume.id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info(_LI('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.'),
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
volume = attachment.finish_attach(
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return attachment
@coordination.synchronized('{volume_id}-{f_name}')
def detach_volume(self, context, volume_id, attachment_id=None,
volume=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
if attachment_id:
try:
attachment = objects.VolumeAttachment.get_by_id(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
volume.finish_detach(attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = volume.volume_attachment
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
volume.status = 'available'
volume.attach_status = fields.VolumeAttachStatus.DETACHED
volume.save()
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info(_LI('Detaching volume %(volume_id)s from instance '
'%(instance)s.'),
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'), {
'attach_status':
fields.VolumeAttachStatus.ERROR_DETACHING})
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
volume.finish_detach(attachment.id)
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(ctx, volume_ref):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on %(service)s.'),
{'image': image_id,
'service': volume_ref.service_topic_queue})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning(_LW('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.'),
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning(_LW('Failed to create new image-volume cache entry.'
' Error: %(exception)s'), {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = (
fields.VolumeAttachStatus.DETACHED)
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception(_LE('Create clone_image_volume: %(volume_id)s'
'for image %(image_id)s, '
'failed (Exception: %(except)s)'),
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume, allow_reschedule=False)
image_volume = objects.Volume.get_by_id(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.'),
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete the image volume %(id)s.'),
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
# The image_owner metadata should be set before uri is added to
# the image so glance cinder store can check its owner.
image_volume_meta = {'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception(_LE('Failed to register image volume location '
'%(uri)s.'), {'uri': uri})
if not image_registered:
LOG.warning(_LW('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.'),
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete failed image volume '
'%(id)s.'), {'id': image_volume.id})
return False
image_volume_meta['glance_image_id'] = image_meta['id']
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = objects.Volume.get_by_id(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
if isinstance(error, exception.ImageLimitExceeded):
self.message_api.create(
context,
defined_messages.EventIds.IMAGE_FROM_VOLUME_OVER_QUOTA,
context.project_id,
resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def _parse_connection_options(self, context, volume, conn_info):
# Add qos_specs to connection info
typeid = volume.volume_type_id
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = volume.admin_metadata
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.encryption_key_id)
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
return conn_info
def initialize_connection(self, context, volume, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
# TODO(jdg): Add deprecation warning
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException as ex:
msg = _("Create export of volume failed (%s)") % ex.msg
LOG.exception(msg, resource=volume)
raise exception.VolumeBackendAPIException(data=msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(context, volume, conn_info)
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Remove volume export completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False,
attach_encryptor=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume %(vol)s."),
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume, properties)
attach_info = self._connect_device(conn)
try:
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_attach_volume_encryptor(ctxt,
attach_info,
encryption)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume encryptor"
" %(vol)s."), {'vol': volume['id']})
self._detach_volume(ctxt, attach_info, volume, properties)
return attach_info
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False,
attach_encryptor=False):
connector = attach_info['connector']
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_detach_volume_encryptor(attach_info, encryption)
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to terminate volume connection: '
'%(err)s.') % {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
attach_encryptor = False
# If the encryption method or key is changed, we have to
# copy data through dm-crypt.
if volume_types.volume_types_encryption_changed(
ctxt,
src_vol.volume_type_id,
dest_vol.volume_type_id):
attach_encryptor = True
properties = utils.brick_get_connector_properties()
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(
ctxt, dest_vol, properties,
remote=dest_remote,
attach_encryptor=attach_encryptor)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(
ctxt, src_vol, properties,
remote=src_remote,
attach_encryptor=attach_encryptor)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach source volume for copy."))
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote,
attach_encryptor=attach_encryptor)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt,
dest_vol.service_topic_queue,
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
copy_error = True
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote,
attach_encryptor=attach_encryptor)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote,
attach_encryptor=attach_encryptor)
def _migrate_volume_generic(self, ctxt, volume, backend, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
tmp_skip = {'snapshot_id', 'source_volid'}
skip = self._VOLUME_CLONE_SKIP_PROPERTIES | tmp_skip | {'host',
'cluster_name'}
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
if volume_types.volume_types_encryption_changed(
ctxt, volume.volume_type_id, new_type_id):
encryption_key_id = vol_utils.create_encryption_key(
ctxt, self.key_manager, new_type_id)
new_vol_values['encryption_key_id'] = encryption_key_id
new_volume = objects.Volume(
context=ctxt,
host=backend['host'],
cluster_name=backend.get('cluster_name'),
status='creating',
attach_status=fields.VolumeAttachStatus.DETACHED,
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, None, None,
allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
# TODO(thangp): Replace get_by_id with refresh when it is available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
# TODO(thangp): Replace get_by_id with refresh when it is
# available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
# Set skipped value to avoid calling
# function except for _create_raw_volume
tmp_skipped_values = {k: volume[k] for k in tmp_skip if volume.get(k)}
if tmp_skipped_values:
new_volume.update(tmp_skipped_values)
new_volume.save()
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
if not attachments:
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume, new_volume,
error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
"Failed to copy volume %(vol1)s to %(vol2)s"), {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume.id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume, new_volume, error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
# Detach the source volume (if it fails, don't fail the migration)
# As after detach and refresh, volume_attchments will be None.
# We keep volume_attachment for later attach.
volume_attachments = []
if orig_volume_status == 'in-use':
for attachment in volume.volume_attachment:
# Save the attachments the volume currently have
volume_attachments.append(attachment)
try:
self.detach_volume(ctxt, volume.id, attachment.id)
except Exception as ex:
LOG.error(_LE("Detach migration source volume "
"%(volume.id)s from instance "
"%(instance_id)s failed: %(err)s"),
{'err': ex,
'volume.id': volume.id,
'instance_id': attachment.id},
resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
# Restore the attachmens
if orig_volume_status == 'in-use':
for attachment in volume_attachments:
LOG.debug('Re-attaching: %s', attachment)
rpcapi.attach_volume(ctxt, volume,
attachment.instance_uuid,
attachment.attached_host,
attachment.mountpoint,
'rw')
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error(_LE('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s'),
{'vol': volume.id, 'err': ex})
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume, host, force_host_copy=False,
new_type_id=None):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
updates = {'host': host['host'],
'cluster_name': host.get('cluster_name'),
'migration_status': 'success',
'previous_status': volume.status}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host, new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_group_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_generic_group(
context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_snapshot_usage(self,
context,
group_snapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_group_snapshot_usage(
context, group_snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume.update({'size': int(new_size), 'status': 'available'})
volume.save()
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def _is_our_backend(self, host, cluster_name):
return ((not cluster_name and
vol_utils.hosts_are_equivalent(self.driver.host, host)) or
(cluster_name and
vol_utils.hosts_are_equivalent(self.driver.cluster_name,
cluster_name)))
def retype(self, context, volume, new_type_id, host,
migration_policy='never', reservations=None,
old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
status_update = {'status': volume.previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# If old_reservations has been passed in from the API, we should
# skip quotas.
# TODO(ntpttr): These reservation checks are left in to be backwards
# compatible with Liberty and can be removed in N.
if not old_reservations:
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
# quota for retyping operation since they didn't changed, just
# reserve volume_type and type gigabytes is fine.
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
volume.update(status_update)
volume.save()
msg = _("Failed to update quota usage while retyping volume.")
LOG.exception(msg, resource=volume)
raise exception.CinderException(msg)
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything.
# Use the admin contex to be able to access volume extra_specs
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context.elevated(), volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host or cluster (depending
# if it's the volume is in a clustered backend or not) is the same as
# the current. If it's not don't call the driver.retype method,
# otherwise drivers that implement retype may report success, but it's
# invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
not diff.get('encryption') and
self._is_our_backend(host['host'], host.get('cluster_name'))):
try:
new_type = volume_types.get_volume_type(context.elevated(),
new_type_id)
with volume.obj_as_admin():
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume.id)
except Exception:
retyped = False
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'cluster_name': host.get('cluster_name'),
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
self._set_replication_status(diff, model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume)
@staticmethod
def _set_replication_status(diff, model_update):
"""Update replication_status in model_update if it has changed."""
if not diff or model_update.get('replication_status'):
return
diff_specs = diff.get('extra_specs', {})
replication_diff = diff_specs.get('replication_enabled')
if replication_diff:
is_replicated = vol_utils.is_replicated_str(replication_diff[1])
if is_replicated:
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
model_update['replication_status'] = replication_status
def manage_existing(self, ctxt, volume, ref=None):
vol_ref = self._run_manage_existing_flow_engine(
ctxt, volume, ref)
self._update_stats_for_managed(vol_ref)
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref.id
def _update_stats_for_managed(self, volume_reference):
# Update volume stats
pool = vol_utils.extract_host(volume_reference.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume_reference.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= volume_reference.size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=volume_reference.size)
def _run_manage_existing_flow_engine(self, ctxt, volume, ref):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume,
ref,
)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
return vol_ref
def _get_my_resources(self, ctxt, ovo_class_list):
if self.cluster:
filters = {'cluster_name': self.cluster}
else:
filters = {'host': self.host}
return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters)
def _get_my_volumes(self, ctxt):
return self._get_my_resources(ctxt, objects.VolumeList)
def _get_my_snapshots(self, ctxt):
return self._get_my_resources(ctxt, objects.SnapshotList)
def get_manageable_volumes(self, ctxt, marker, limit, offset, sort_keys,
sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable volumes failed, due "
"to uninitialized driver."))
cinder_volumes = self._get_my_volumes(ctxt)
try:
driver_entries = self.driver.get_manageable_volumes(
cinder_volumes, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableVolumeList.
from_primitives(ctxt, driver_entries))
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable volumes failed, due "
"to driver error."))
return driver_entries
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
return self._create_group(context, group, False)
def create_group(self, context, group):
"""Creates the group."""
return self._create_group(context, group)
def _create_group(self, context, group, is_generic_group=True):
context = context.elevated()
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(group)
status = fields.GroupStatus.AVAILABLE
model_update = None
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.start")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Group %s: creating"), group.name)
if is_generic_group:
try:
model_update = self.driver.create_group(context,
group)
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update = self._create_group_generic(context,
group)
else:
cg, __ = self._convert_group_to_cg(group, [])
model_update = self.driver.create_consistencygroup(
context, cg)
else:
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.GroupStatus.ERROR):
msg = (_('Create group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error(_LE("Group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Group %s: created successfully"),
group.name)
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.end")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create group completed successfully."),
resource={'type': 'group',
'id': group.id})
return group
def create_consistencygroup_from_src(self, context, group,
cgsnapshot=None, source_cg=None):
"""Creates the consistency group from source.
The source can be a CG snapshot or a source CG.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = self.db.volume_get_all_by_group(context, group.id)
if cgsnapshot:
try:
# Check if cgsnapshot still exists
cgsnapshot = objects.CGSnapshot.get_by_id(
context, cgsnapshot.id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("snapshot-%s") % cgsnapshot.id
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
if source_cg:
try:
source_cg = objects.ConsistencyGroup.get_by_id(
context, source_cg.id)
except exception.ConsistencyGroupNotFound:
LOG.error(_LE("Create consistency group "
"from source cg-%(cg)s failed: "
"ConsistencyGroupNotFound."),
{'cg': source_cg.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("cg-%s") % source_cg.id
source_vols = self.db.volume_get_all_by_group(
context, source_cg.id)
for source_vol in source_vols:
if (source_vol['status'] not in
VALID_CREATE_CG_SRC_CG_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol['id'],
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if cgsnapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_cg and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_consistencygroup_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create consistency group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
return group
def create_group_from_src(self, context, group,
group_snapshot=None, source_group=None):
"""Creates the group from source.
The source can be a group snapshot or a source group.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = objects.VolumeList.get_all_by_generic_group(context,
group.id)
if group_snapshot:
try:
# Check if group_snapshot still exists
group_snapshot = objects.GroupSnapshot.get_by_id(
context, group_snapshot.id)
except exception.GroupSnapshotNotFound:
LOG.error(_LE("Create group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': group_snapshot.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("snapshot-%s") % group_snapshot.id
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_GROUP_SRC_SNAP_STATUS):
msg = (_("Cannot create group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS})
raise exception.InvalidGroup(reason=msg)
if source_group:
try:
source_group = objects.Group.get_by_id(
context, source_group.id)
except exception.GroupNotFound:
LOG.error(_LE("Create group "
"from source group-%(group)s failed: "
"GroupNotFound."),
{'group': source_group.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("group-%s") % source_group.id
source_vols = objects.VolumeList.get_all_by_generic_group(
context, source_group.id)
for source_vol in source_vols:
if (source_vol.status not in
VALID_CREATE_GROUP_SRC_GROUP_STATUS):
msg = (_("Cannot create group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol.id,
'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS})
raise exception.InvalidGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if group_snapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_group and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_group_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.create_group_from_src(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group, sorted_source_vols))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, volumes_model_update = (
self._create_group_from_src_generic(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group,
sorted_source_vols))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
cgsnapshot, sorted_snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, sorted_snapshots, context))
source_cg, sorted_source_vols = (
self._convert_group_to_cg(source_group,
sorted_source_vols))
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, cg, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
self._remove_cgsnapshot_id_from_snapshots(sorted_snapshots)
self._remove_consistencygroup_id_from_volumes(volumes)
self._remove_consistencygroup_id_from_volumes(
sorted_source_vols)
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'group',
'id': group.id})
# Update volume status to 'error' as well.
self._remove_consistencygroup_id_from_volumes(volumes)
for vol in volumes:
vol.status = 'error'
vol.save()
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_group_usage(
context, group, "create.end")
LOG.info(_LI("Create group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'group',
'id': group.id})
return group
def _create_group_from_src_generic(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
"""Creates a group from source.
:param context: the context of the caller.
:param group: the Group object to be created.
:param volumes: a list of volume objects in the group.
:param group_snapshot: the GroupSnapshot object as source.
:param snapshots: a list of snapshot objects in group_snapshot.
:param source_group: the Group object as source.
:param source_vols: a list of volume objects in the source_group.
:returns: model_update, volumes_model_update
"""
for vol in volumes:
try:
if snapshots:
for snapshot in snapshots:
if vol.snapshot_id == snapshot.id:
self.driver.create_volume_from_snapshot(
vol, snapshot)
break
except Exception:
raise
try:
if source_vols:
for source_vol in source_vols:
if vol.source_volid == source_vol.id:
self.driver.create_cloned_volume(vol, source_vol)
break
except Exception:
raise
return None, None
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error(_LE("Source volumes cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
source_volid = vol.get('source_volid')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
if source_volid:
source_vol = objects.Volume.get_by_id(context, source_volid)
if source_vol.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context, source_volid, vol['id'])
if source_vol.multiattach:
update['multiattach'] = True
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_group(context, group.id)
for volume in volumes:
if (volume.attach_status ==
fields.VolumeAttachStatus.ATTACHED):
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
self._notify_about_consistencygroup_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, group, volumes))
if volumes_model_update:
for volume in volumes_model_update:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.save()
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
for volume in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume.id)
self.db.volume_destroy(context, volume.id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume.size
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
group.destroy()
self._notify_about_consistencygroup_usage(
context, group, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def delete_group(self, context, group):
"""Deletes group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
for vol_obj in volumes:
if vol_obj.attach_status == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=vol_obj.id)
self._check_is_our_resource(vol_obj)
self._notify_about_group_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.delete_group(context, group, volumes))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, volumes_model_update = (
self._delete_group_generic(context, group, volumes))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, cg,
volumes))
self._remove_consistencygroup_id_from_volumes(volumes)
if volumes_model_update:
for update in volumes_model_update:
# If we failed to delete a volume, make sure the
# status for the group is set to error as well
if (update['status'] in ['error_deleting', 'error']
and model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = update['status']
self.db.volumes_update(context, volumes_model_update)
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
self._remove_consistencygroup_id_from_volumes(volumes)
for vol_obj in volumes:
vol_obj.status = 'error'
vol_obj.save()
# Get reservations for group
try:
reserve_opts = {'groups': -1}
grpreservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
grpreservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
for vol in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -vol.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
vol.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, vol.id)
vol.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= vol.size
if grpreservations:
GROUP_QUOTAS.commit(context, grpreservations,
project_id=project_id)
group.destroy()
self._notify_about_group_usage(
context, group, "delete.end")
self.publish_service_capabilities(context)
LOG.info(_LI("Delete group "
"completed successfully."),
resource={'type': 'group',
'id': group.id})
def _convert_group_to_cg(self, group, volumes):
if not group:
return None, None
cg = consistencygroup.ConsistencyGroup()
cg.from_group(group)
for vol in volumes:
vol.consistencygroup_id = vol.group_id
vol.consistencygroup = cg
return cg, volumes
def _remove_consistencygroup_id_from_volumes(self, volumes):
if not volumes:
return
for vol in volumes:
vol.consistencygroup_id = None
vol.consistencygroup = None
def _convert_group_snapshot_to_cgsnapshot(self, group_snapshot, snapshots,
ctxt):
if not group_snapshot:
return None, None
cgsnap = cgsnapshot.CGSnapshot()
cgsnap.from_group_snapshot(group_snapshot)
# Populate consistencygroup object
grp = objects.Group.get_by_id(ctxt, group_snapshot.group_id)
cg, __ = self._convert_group_to_cg(grp, [])
cgsnap.consistencygroup = cg
for snap in snapshots:
snap.cgsnapshot_id = snap.group_snapshot_id
snap.cgsnapshot = cgsnap
return cgsnap, snapshots
def _remove_cgsnapshot_id_from_snapshots(self, snapshots):
if not snapshots:
return
for snap in snapshots:
snap.cgsnapshot_id = None
snap.cgsnapshot = None
def _create_group_generic(self, context, group):
"""Creates a group."""
# A group entry is already created in db. Just returns a status here.
model_update = {'status': fields.GroupStatus.AVAILABLE,
'created_at': timeutils.utcnow()}
return model_update
def _delete_group_generic(self, context, group, volumes):
"""Deletes a group and volumes in the group."""
model_update = {'status': group.status}
volume_model_updates = []
for volume_ref in volumes:
volume_model_update = {'id': volume_ref.id}
try:
self.driver.remove_export(context, volume_ref)
self.driver.delete_volume(volume_ref)
volume_model_update['status'] = 'deleted'
except exception.VolumeIsBusy:
volume_model_update['status'] = 'available'
except Exception:
volume_model_update['status'] = 'error'
model_update['status'] = fields.GroupStatus.ERROR
volume_model_updates.append(volume_model_update)
return model_update, volume_model_updates
def _update_group_generic(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group."""
# NOTE(xyang): The volume manager adds/removes the volume to/from the
# group in the database. This default implementation does not do
# anything in the backend storage.
return None, None, None
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ovo = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol},
resource={'type': 'consistency_group',
'id': group.id})
raise
if add_vol_ovo.status not in VALID_ADD_VOL_TO_CG_STATUS:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ovo.id,
'group_id': group.id,
'status': add_vol_ovo.status,
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
self._check_is_our_resource(add_vol_ovo)
add_volumes_ref.append(add_vol_ovo)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol},
resource={'type': 'consistency_group',
'id': group.id})
raise
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref['id'],
'group_id': group.id,
'status': remove_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in (
[fields.ConsistencyGroupStatus.ERROR]):
msg = (_('Error occurred when updating consistency group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
group.status = 'available'
group.update_at = now
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group.id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Update consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates group.
Update group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if add_vol_ref.status not in VALID_ADD_VOL_TO_GROUP_STATUS:
msg = (_("Cannot add volume %(volume_id)s to "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'status': add_vol_ref.status,
'valid': VALID_ADD_VOL_TO_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
self._check_is_our_resource(add_vol_ref)
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = objects.Volume.get_by_id(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if (remove_vol_ref.status not in
VALID_REMOVE_VOL_FROM_GROUP_STATUS):
msg = (_("Cannot remove volume %(volume_id)s from "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref.id,
'group_id': group.id,
'status': remove_vol_ref.status,
'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_group_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
try:
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_group(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, add_volumes_update, remove_volumes_update = (
self._update_group_generic(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
else:
cg, remove_volumes_ref = self._convert_group_to_cg(
group, remove_volumes_ref)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
if add_volumes_update:
self.db.volumes_update(context, add_volumes_update)
if remove_volumes_update:
self.db.volumes_update(context, remove_volumes_update)
if model_update:
if model_update['status'] in (
[fields.GroupStatus.ERROR]):
msg = (_('Error occurred when updating group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
group.status = 'available'
group.save()
for add_vol in add_volumes_ref:
add_vol.group_id = group.id
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.group_id = None
rem_vol.save()
self._notify_about_group_usage(
context, group, "update.end")
LOG.info(_LI("Update group completed successfully."),
resource={'type': 'group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
self.db.snapshot_update(context,
snap_model['id'],
snap_model)
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
cgsnapshot.save()
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.end")
return cgsnapshot
def create_group_snapshot(self, context, group_snapshot):
"""Creates the group_snapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("GroupSnapshot %s: creating."), group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Group snapshot %(grp_snap_id)s: creating.",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.create_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group_snapshot.group_type_id != cgsnap_type['id']:
model_update, snapshots_model_update = (
self._create_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap_id = snap_model.pop('id')
snap_obj = objects.Snapshot.get_by_id(context, snap_id)
snap_obj.update(snap_model)
snap_obj.save()
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = 'error'
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot.volume_id
snapshot_id = snapshot.id
vol_obj = objects.Volume.get_by_id(context, volume_id)
if vol_obj.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
group_snapshot.status = 'available'
group_snapshot.save()
LOG.info(_LI("group_snapshot %s: created successfully"),
group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.end")
return group_snapshot
def _create_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Creates a group_snapshot."""
model_update = {'status': 'available'}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.create_snapshot(snapshot)
snapshot_model_update['status'] = 'available'
except Exception:
snapshot_model_update['status'] = 'error'
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def _delete_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Deletes a group_snapshot."""
model_update = {'status': group_snapshot.status}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.delete_snapshot(snapshot)
snapshot_model_update['status'] = 'deleted'
except exception.SnapshotIsBusy:
snapshot_model_update['status'] = 'available'
except Exception:
snapshot_model_update['status'] = 'error'
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
project_id = cgsnapshot.project_id
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
# TODO(thangp): Switch over to use snapshot.destroy()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
cgsnapshot.destroy()
LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id)
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
snapshots)
def delete_group_snapshot(self, context, group_snapshot):
"""Deletes group_snapshot."""
caller_context = context
context = context.elevated()
project_id = group_snapshot.project_id
LOG.info(_LI("group_snapshot %s: deleting"), group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("group_snapshot %(grp_snap_id)s: deleting",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.delete_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group_snapshot.group_type_id != cgsnap_type['id']:
model_update, snapshots_model_update = (
self._delete_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap_model.pop('id')
snap.update(snap_model)
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = 'error'
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
self._remove_cgsnapshot_id_from_snapshots(snapshots)
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = objects.Volume.get_by_id(context,
snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot.id)
snapshot.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
group_snapshot.destroy()
LOG.info(_LI("group_snapshot %s: deleted successfully"),
group_snapshot.id)
self._notify_about_group_snapshot_usage(context, group_snapshot,
"delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 and a/a method
def failover(self, context, secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceptable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
updates = {}
repl_status = fields.ReplicationStatus
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(context, svc_host,
constants.VOLUME_BINARY)
volumes = self._get_my_volumes(context)
exception_encountered = True
try:
# For non clustered we can call v2.1 failover_host, but for
# clustered we call a/a failover method. We know a/a method
# exists because BaseVD class wouldn't have started if it didn't.
failover = getattr(self.driver,
'failover' if service.is_clustered
else 'failover_host')
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
active_backend_id, volume_update_list = failover(
context,
volumes,
secondary_id=secondary_backend_id)
exception_encountered = False
except exception.UnableToFailOver:
LOG.exception(_LE("Failed to perform replication failover"))
updates['replication_status'] = repl_status.FAILOVER_ERROR
except exception.InvalidReplicationTarget:
LOG.exception(_LE("Invalid replication target specified "
"for failover"))
# Preserve the replication_status: Status should be failed over if
# we were failing back or if we were failing over from one
# secondary to another secondary. In both cases active_backend_id
# will be set.
if service.active_backend_id:
updates['replication_status'] = repl_status.FAILED_OVER
else:
updates['replication_status'] = repl_status.ENABLED
except exception.VolumeDriverException:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
updates.update(disabled=True,
replication_status=repl_status.FAILOVER_ERROR)
if exception_encountered:
LOG.error(
_LE("Error encountered during failover on host: "
"%(host)s invalid target ID %(backend_id)s"),
{'host': self.host, 'backend_id':
secondary_backend_id})
self.finish_failover(context, service, updates)
return
if secondary_backend_id == "default":
updates['replication_status'] = repl_status.ENABLED
updates['active_backend_id'] = ''
updates['disabled'] = service.frozen
updates['disabled_reason'] = 'frozen' if service.frozen else ''
else:
updates['replication_status'] = repl_status.FAILED_OVER
updates['active_backend_id'] = active_backend_id
updates['disabled'] = True
updates['disabled_reason'] = 'failed-over'
self.finish_failover(context, service, updates)
for update in volume_update_list:
# Response must include an id key: {volume_id: <cinder-uuid>}
if not update.get('volume_id'):
raise exception.UnableToFailOver(
reason=_("Update list, doesn't include volume_id"))
# Key things to consider (attaching failed-over volumes):
# provider_location
# provider_auth
# provider_id
# replication_status
vobj = objects.Volume.get_by_id(context, update['volume_id'])
vobj.update(update.get('updates', {}))
vobj.save()
LOG.info(_LI("Failed over to replication target successfully."))
# TODO(geguileo): In P - remove this
failover_host = failover
def finish_failover(self, context, service, updates):
"""Completion of the failover locally or via RPC."""
# If the service is clustered, broadcast the service changes to all
# volume services, including this one.
if service.is_clustered:
# We have to update the cluster with the same data, and we do it
# before broadcasting the failover_completed RPC call to prevent
# races with services that may be starting..
for key, value in updates.items():
setattr(service.cluster, key, value)
service.cluster.save()
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.failover_completed(context, service, updates)
else:
service.update(updates)
service.save()
def failover_completed(self, context, updates):
"""Finalize failover of this backend.
When a service is clustered and replicated the failover has 2 stages,
one that does the failover of the volumes and another that finalizes
the failover of the services themselves.
This method takes care of the last part and is called from the service
doing the failover of the volumes after finished processing the
volumes.
"""
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(context, svc_host,
constants.VOLUME_BINARY)
service.update(updates)
try:
self.driver.failover_completed(context, service.active_backend_id)
except Exception:
msg = _('Driver reported error during replication failover '
'completion.')
LOG.exception(msg)
service.disabled = True
service.disabled_reason = msg
service.replication_status = (
fields.ReplicationStatus.ERROR)
service.save()
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry<|fim▁hole|> # need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning(_LW('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.'))
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info(_LI("Set backend status to frozen successfully."))
return True
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error(_LE('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.'))
return False
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info(_LI("Thawed backend successfully."))
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
msg = _LE("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.")
LOG.exception(msg, {'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_manageable_snapshots(self, ctxt, marker, limit, offset,
sort_keys, sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable snapshots failed, due "
"to uninitialized driver."))
cinder_snapshots = self._get_my_snapshots(ctxt)
try:
driver_entries = self.driver.get_manageable_snapshots(
cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableSnapshotList.
from_primitives(ctxt, driver_entries))
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable snapshots failed, due "
"to driver error."))
return driver_entries
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
def get_backup_device(self, ctxt, backup, want_objects=False):
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
# TODO(sborkows): from_primitive method will be removed in O, so there
# is a need to clean here then.
return (objects.BackupDeviceInfo.from_primitive(backup_device_dict,
ctxt)
if want_objects else backup_device_dict)
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
def _connection_create(self, ctxt, volume, attachment, connector):
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(ctxt.elevated(),
volume, connector)
except exception.CinderException as ex:
err_msg = (_("Create export for volume failed (%s).") % ex.msg)
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(ctxt.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(ctxt, volume, conn_info)
# NOTE(jdg): Get rid of the nested dict (data key)
conn_data = conn_info.pop('data', {})
connection_info = conn_data.copy()
connection_info.update(conn_info)
values = {'volume_id': volume.id,
'attach_status': 'attaching', }
self.db.volume_attachment_update(ctxt, attachment.id, values)
self.db.attachment_specs_update_or_create(
ctxt,
attachment.id,
connector)
connection_info['attachment_id'] = attachment.id
return connection_info
def attachment_update(self,
context,
vref,
connector,
attachment_id):
"""Update/Finalize an attachment.
This call updates a valid attachment record to associate with a volume
and provide the caller with the proper connection info. Note that
this call requires an `attachment_ref`. It's expected that prior to
this call that the volume and an attachment UUID has been reserved.
param: vref: Volume object to create attachment for
param: connector: Connector object to use for attachment creation
param: attachment_ref: ID of the attachment record to update
"""
mode = connector.get('mode', 'rw')
self._notify_about_volume_usage(context, vref, 'attach.start')
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
connection_info = self._connection_create(context,
vref,
attachment_ref,
connector)
# FIXME(jdg): get rid of this admin_meta option here, the only thing
# it does is enforce that a volume is R/O, that should be done via a
# type and not *more* metadata
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(),
attachment_ref.volume_id,
{'attached_mode': mode}, False)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, vref.id,
{'status': 'error_attaching'})
self.message_api.create(
context, defined_messages.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=vref.id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=vref.id)
try:
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
vref,
attachment_ref.instance_uuid,
connector.get('hostname', ''),
connector.get('mountpoint', 'na'))
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_ref.id,
{'attach_status': 'error_attaching'})
self.db.volume_attached(context.elevated(),
attachment_ref.id,
attachment_ref.instance_uuid,
connector.get('hostname', ''),
connector.get('mountpoint', 'na'),
mode)
vref.refresh()
self._notify_about_volume_usage(context, vref, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=vref)
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
return connection_info
def _connection_terminate(self, context, volume,
attachment, force=False):
"""Remove a volume connection, but leave attachment."""
utils.require_driver_initialized(self.driver)
# TODO(jdg): Add an object method to cover this
connector = self.db.attachment_specs_get(
context,
attachment.id)
try:
shared_connections = self.driver.terminate_connection(volume,
connector,
force=force)
if not isinstance(shared_connections, bool):
shared_connections = False
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume)
# NOTE(jdg): Return True/False if there are other outstanding
# attachments that share this connection. If True should signify
# caller to preserve the actual host connection (work should be
# done in the brick connector as it has the knowledge of what's
# going on here.
return shared_connections
def attachment_delete(self, context, attachment_id, vref):
"""Delete/Detach the specified attachment.
Notifies the backend device that we're detaching the specified
attachment instance.
param: vref: Volume object associated with the attachment
param: attachment: Attachment reference object to remove
NOTE if the attachment reference is None, we remove all existing
attachments for the specified volume object.
"""
has_shared_connection = False
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
if not attachment_ref:
for attachment in VA_LIST.get_all_by_volume_id(context, vref.id):
if self._do_attachment_delete(context, vref, attachment):
has_shared_connection = True
else:
has_shared_connection = (
self._do_attachment_delete(context, vref, attachment_ref))
return has_shared_connection
def _do_attachment_delete(self, context, vref, attachment):
utils.require_driver_initialized(self.driver)
self._notify_about_volume_usage(context, vref, "detach.start")
has_shared_connection = self._connection_terminate(context,
vref,
attachment)
self.driver.detach_volume(context, vref, attachment)
try:
LOG.debug('Deleting attachment %(attachment_id)s.',
{'attachment_id': attachment.id},
resource=vref)
self.driver.detach_volume(context, vref, attachment)
self.driver.remove_export(context.elevated(), vref)
except Exception:
# FIXME(jdg): Obviously our volume object is going to need some
# changes to deal with multi-attach and figuring out how to
# represent a single failed attach out of multiple attachments
# TODO(jdg): object method here
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
else:
self.db.volume_detached(context.elevated(), vref.id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(),
vref.id,
'attached_mode')
self._notify_about_volume_usage(context, vref, "detach.end")
return has_shared_connection
def is_volume_trusted(self, ctxt, volume_id):
volume = self.db.api.volume_get(ctxt, volume_id)
verify_trust = False
asset_tags = 'None'
host = ''
for metadata in volume.volume_metadata:
if metadata.key == 'trust':
host = volume.host.split("@")[0]
verify_trust = True
elif metadata.key == 'asset_tags':
asset_tags = metadata.value
if verify_trust:
return self.asset_tag_filter.is_trusted(host, asset_tags)
return None<|fim▁end|> | try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really |
<|file_name|>bcsynchronous.C<|end_file_name|><|fim▁begin|>/*
* CINELERRA
* Copyright (C) 2008 Adam Williams <broadcast at earthling dot net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#define GL_GLEXT_PROTOTYPES
#include "bcresources.h"
#include "bcsignals.h"
#include "bcsynchronous.h"
#include "bcwindowbase.h"
#include "condition.h"
#include "mutex.h"
#ifdef HAVE_GL
#include <GL/gl.h>
#endif
#include <string.h>
#include <unistd.h>
#include <string.h>
TextureID::TextureID(int window_id, int id, int w, int h, int components)
{
this->window_id = window_id;
this->id = id;
this->w = w;
this->h = h;
this->components = components;
in_use = 1;
}
ShaderID::ShaderID(int window_id, unsigned int handle, char *source)
{
this->window_id = window_id;
this->handle = handle;
this->source = strdup(source);
}
ShaderID::~ShaderID()
{
free(source);
}
#ifdef HAVE_GL
PBufferID::PBufferID(int window_id,
GLXPbuffer pbuffer,
GLXContext gl_context,
int w,
int h)
{
this->pbuffer = pbuffer;
this->gl_context = gl_context;
this->window_id = window_id;
this->w = w;
this->h = h;
in_use = 1;
}
#endif
BC_SynchronousCommand::BC_SynchronousCommand()
{
command = BC_SynchronousCommand::NONE;
frame = 0;
frame_return = 0;
result = 0;
command_done = new Condition(0, "BC_SynchronousCommand::command_done", 0);
}
BC_SynchronousCommand::~BC_SynchronousCommand()
{
delete command_done;
}
void BC_SynchronousCommand::copy_from(BC_SynchronousCommand *command)
{
this->command = command->command;
this->colormodel = command->colormodel;
this->window = command->window;
this->frame = command->frame;
this->window_id = command->window_id;
this->frame_return = command->frame_return;
this->id = command->id;
this->w = command->w;
this->h = command->h;
}
BC_Synchronous::BC_Synchronous()
: Thread(1, 0, 0)
{
next_command = new Condition(0, "BC_Synchronous::next_command", 0);
command_lock = new Mutex("BC_Synchronous::command_lock");
table_lock = new Mutex("BC_Synchronous::table_lock");
done = 0;
is_running = 0;
current_window = 0;
process_group = setpgid(getpid(), 0);
BC_WindowBase::get_resources()->set_synchronous(this);
}
BC_Synchronous::~BC_Synchronous()
{
commands.remove_all_objects();
}
BC_SynchronousCommand* BC_Synchronous::new_command()
{
return new BC_SynchronousCommand;
}
void BC_Synchronous::create_objects()
{
}
void BC_Synchronous::start()
{
run();
}
void BC_Synchronous::quit()
{
command_lock->lock("BC_Synchronous::quit");
BC_SynchronousCommand *command = new_command();
commands.append(command);
command->command = BC_SynchronousCommand::QUIT;
command_lock->unlock();
next_command->unlock();
}
int BC_Synchronous::send_command(BC_SynchronousCommand *command)
{
command_lock->lock("BC_Synchronous::send_command");
BC_SynchronousCommand *command2 = new_command();
commands.append(command2);
command2->copy_from(command);
command_lock->unlock();
next_command->unlock();
//printf("BC_Synchronous::send_command 1 %d\n", next_command->get_value());
// Wait for completion
command2->command_done->lock("BC_Synchronous::send_command");
int result = command2->result;
delete command2;
return result;
}
void BC_Synchronous::run()
{
is_running = 1;
while(!done)
{
next_command->lock("BC_Synchronous::run");
command_lock->lock("BC_Synchronous::run");
BC_SynchronousCommand *command = 0;
if(commands.total)
{
command = commands.values[0];
commands.remove_number(0);
}
// Prevent executing the same command twice if spurious unlock.
command_lock->unlock();
//printf("BC_Synchronous::run %d\n", command->command);
handle_command_base(command);
// delete command;
}
is_running = 0;
killpg(process_group, SIGUSR1);
}
void BC_Synchronous::handle_command_base(BC_SynchronousCommand *command)
{
if(command)
{
//printf("BC_Synchronous::handle_command_base 1 %d\n", command->command);
switch(command->command)
{
case BC_SynchronousCommand::QUIT:
done = 1;
break;
default:
handle_command(command);
break;
}
}
handle_garbage();
if(command)
{
command->command_done->unlock();
}
}
void BC_Synchronous::handle_command(BC_SynchronousCommand *command)
{
}
void BC_Synchronous::handle_garbage()
{
while(1)
{
table_lock->lock("BC_Synchronous::handle_garbage");
if(!garbage.total)
{
table_lock->unlock();
return;
}
BC_SynchronousCommand *command = garbage.values[0];
garbage.remove_number(0);
table_lock->unlock();
switch(command->command)
{
case BC_SynchronousCommand::DELETE_WINDOW:
delete_window_sync(command);
break;
case BC_SynchronousCommand::DELETE_PIXMAP:
delete_pixmap_sync(command);
break;
}
delete command;
}
}
void BC_Synchronous::put_texture(int id, int w, int h, int components)
{
if(id >= 0)
{
table_lock->lock("BC_Resources::put_texture");
// Search for duplicate
for(int i = 0; i < texture_ids.total; i++)
{
TextureID *ptr = texture_ids.values[i];
if(ptr->window_id == current_window->get_id() &&
ptr->id == id)
{
printf("BC_Synchronous::push_texture: texture exists\n"
"exists: window=%d id=%d w=%d h=%d\n"
"new: window=%d id=%d w=%d h=%d\n",
ptr->window_id,
ptr->id,
ptr->w,
ptr->h,
current_window->get_id(),
id,
w,
h);
table_lock->unlock();
return;
}
}
TextureID *new_id = new TextureID(current_window->get_id(),
id,
w, <|fim▁hole|> texture_ids.append(new_id);
table_lock->unlock();
}
}
int BC_Synchronous::get_texture(int w, int h, int components)
{
table_lock->lock("BC_Resources::get_texture");
for(int i = 0; i < texture_ids.total; i++)
{
if(texture_ids.values[i]->w == w &&
texture_ids.values[i]->h == h &&
texture_ids.values[i]->components == components &&
!texture_ids.values[i]->in_use &&
texture_ids.values[i]->window_id == current_window->get_id())
{
int result = texture_ids.values[i]->id;
texture_ids.values[i]->in_use = 1;
table_lock->unlock();
return result;
}
}
table_lock->unlock();
return -1;
}
void BC_Synchronous::release_texture(int window_id, int id)
{
table_lock->lock("BC_Resources::release_texture");
for(int i = 0; i < texture_ids.total; i++)
{
if(texture_ids.values[i]->id == id &&
texture_ids.values[i]->window_id == window_id)
{
texture_ids.values[i]->in_use = 0;
table_lock->unlock();
return;
}
}
table_lock->unlock();
}
unsigned int BC_Synchronous::get_shader(char *source, int *got_it)
{
table_lock->lock("BC_Resources::get_shader");
for(int i = 0; i < shader_ids.total; i++)
{
if(shader_ids.values[i]->window_id == current_window->get_id() &&
!strcmp(shader_ids.values[i]->source, source))
{
unsigned int result = shader_ids.values[i]->handle;
table_lock->unlock();
*got_it = 1;
return result;
}
}
table_lock->unlock();
*got_it = 0;
return 0;
}
void BC_Synchronous::put_shader(unsigned int handle,
char *source)
{
table_lock->lock("BC_Resources::put_shader");
shader_ids.append(new ShaderID(current_window->get_id(), handle, source));
table_lock->unlock();
}
void BC_Synchronous::dump_shader(unsigned int handle)
{
int got_it = 0;
table_lock->lock("BC_Resources::dump_shader");
for(int i = 0; i < shader_ids.total; i++)
{
if(shader_ids.values[i]->handle == handle)
{
printf("BC_Synchronous::dump_shader\n"
"%s", shader_ids.values[i]->source);
got_it = 1;
break;
}
}
table_lock->unlock();
if(!got_it) printf("BC_Synchronous::dump_shader couldn't find %d\n", handle);
}
void BC_Synchronous::delete_window(BC_WindowBase *window)
{
#ifdef HAVE_GL
BC_SynchronousCommand *command = new_command();
command->command = BC_SynchronousCommand::DELETE_WINDOW;
command->window_id = window->get_id();
command->display = window->get_display();
command->win = window->win;
command->gl_context = window->gl_win_context;
send_garbage(command);
#endif
}
void BC_Synchronous::delete_window_sync(BC_SynchronousCommand *command)
{
#ifdef HAVE_GL
int window_id = command->window_id;
Display *display = command->display;
Window win = command->win;
GLXContext gl_context = command->gl_context;
int debug = 0;
// texture ID's are unique to different contexts
glXMakeCurrent(display,
win,
gl_context);
table_lock->lock("BC_Resources::release_textures");
for(int i = 0; i < texture_ids.total; i++)
{
if(texture_ids.values[i]->window_id == window_id)
{
GLuint id = texture_ids.values[i]->id;
glDeleteTextures(1, &id);
if(debug)
printf("BC_Synchronous::delete_window_sync texture_id=%d window_id=%d\n",
id,
window_id);
texture_ids.remove_object_number(i);
i--;
}
}
for(int i = 0; i < shader_ids.total; i++)
{
if(shader_ids.values[i]->window_id == window_id)
{
glDeleteShader(shader_ids.values[i]->handle);
if(debug)
printf("BC_Synchronous::delete_window_sync shader_id=%d window_id=%d\n",
shader_ids.values[i]->handle,
window_id);
shader_ids.remove_object_number(i);
i--;
}
}
for(int i = 0; i < pbuffer_ids.total; i++)
{
if(pbuffer_ids.values[i]->window_id == window_id)
{
glXDestroyPbuffer(display, pbuffer_ids.values[i]->pbuffer);
glXDestroyContext(display, pbuffer_ids.values[i]->gl_context);
if(debug)
printf("BC_Synchronous::delete_window_sync pbuffer_id=%p window_id=%d\n",
pbuffer_ids.values[i]->pbuffer,
window_id);
pbuffer_ids.remove_object_number(i);
i--;
}
}
table_lock->unlock();
XDestroyWindow(display, win);
if(gl_context) glXDestroyContext(display, gl_context);
#endif
}
#ifdef HAVE_GL
void BC_Synchronous::put_pbuffer(int w,
int h,
GLXPbuffer pbuffer,
GLXContext gl_context)
{
int exists = 0;
table_lock->lock("BC_Resources::release_textures");
for(int i = 0; i < pbuffer_ids.total; i++)
{
PBufferID *ptr = pbuffer_ids.values[i];
if(ptr->w == w &&
ptr->h == h &&
ptr->pbuffer == pbuffer)
{
// Exists
exists = 1;
break;
}
}
if(!exists)
{
PBufferID *ptr = new PBufferID(current_window->get_id(),
pbuffer,
gl_context,
w,
h);
pbuffer_ids.append(ptr);
}
table_lock->unlock();
}
GLXPbuffer BC_Synchronous::get_pbuffer(int w,
int h,
int *window_id,
GLXContext *gl_context)
{
table_lock->lock("BC_Resources::release_textures");
for(int i = 0; i < pbuffer_ids.total; i++)
{
PBufferID *ptr = pbuffer_ids.values[i];
if(ptr->w == w &&
ptr->h == h &&
ptr->window_id == current_window->get_id() &&
!ptr->in_use)
{
GLXPbuffer result = ptr->pbuffer;
*gl_context = ptr->gl_context;
*window_id = ptr->window_id;
ptr->in_use = 1;
table_lock->unlock();
return result;
}
}
table_lock->unlock();
return 0;
}
void BC_Synchronous::release_pbuffer(int window_id, GLXPbuffer pbuffer)
{
table_lock->lock("BC_Resources::release_textures");
for(int i = 0; i < pbuffer_ids.total; i++)
{
PBufferID *ptr = pbuffer_ids.values[i];
if(ptr->window_id == window_id)
{
ptr->in_use = 0;
}
}
table_lock->unlock();
}
void BC_Synchronous::delete_pixmap(BC_WindowBase *window,
GLXPixmap pixmap,
GLXContext context)
{
BC_SynchronousCommand *command = new_command();
command->command = BC_SynchronousCommand::DELETE_PIXMAP;
command->window_id = window->get_id();
command->display = window->get_display();
command->win = window->win;
command->gl_pixmap = pixmap;
command->gl_context = context;
send_garbage(command);
}
#endif
void BC_Synchronous::delete_pixmap_sync(BC_SynchronousCommand *command)
{
#ifdef HAVE_GL
Display *display = command->display;
Window win = command->win;
glXMakeCurrent(display,
win,
command->gl_context);
glXDestroyContext(display, command->gl_context);
glXDestroyGLXPixmap(display, command->gl_pixmap);
#endif
}
void BC_Synchronous::send_garbage(BC_SynchronousCommand *command)
{
table_lock->lock("BC_Synchronous::delete_window");
garbage.append(command);
table_lock->unlock();
next_command->unlock();
}
BC_WindowBase* BC_Synchronous::get_window()
{
return current_window;
}<|fim▁end|> | h,
components); |
<|file_name|>addresses.ts<|end_file_name|><|fim▁begin|>export class Address {
main: string;
suite: string;
country: string;
region: string;
province: string;
postalCode: string;
type: string;<|fim▁hole|><|fim▁end|> | } |
<|file_name|>octopi_cachecleaner_hi.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="hi" version="2.0">
<context>
<name>CacheCleaner</name>
<message>
<location filename="Projects/octopi/cachecleaner/ui/cachecleaner.ui" line="14"/>
<source>Cache Cleaner - Octopi</source>
<translation>कैश हटाने हेतु साधन - ऑक्टोपी</translation>
</message><|fim▁hole|> <location filename="Projects/octopi/cachecleaner/ui/cachecleaner.ui" line="49"/>
<source>Uninstalled packages</source>
<translation>हटाए गए पैकेज</translation>
</message>
<message>
<location filename="Projects/octopi/cachecleaner/ui/cachecleaner.ui" line="75"/>
<location filename="Projects/octopi/cachecleaner/ui/cachecleaner.ui" line="150"/>
<source>Keep :</source>
<translation>रखें :</translation>
</message>
<message>
<location filename="Projects/octopi/cachecleaner/ui/cachecleaner.ui" line="82"/>
<location filename="Projects/octopi/cachecleaner/ui/cachecleaner.ui" line="157"/>
<source>Number of old versions to keep</source>
<translation>रखने हेतु पुराने पैकेज की संस्करण संख्या</translation>
</message>
<message>
<location filename="Projects/octopi/cachecleaner/ui/cachecleaner.ui" line="102"/>
<location filename="Projects/octopi/cachecleaner/ui/cachecleaner.ui" line="183"/>
<source>Refresh</source>
<translation> रिफ्रेश करें</translation>
</message>
<message>
<location filename="Projects/octopi/cachecleaner/ui/cachecleaner.ui" line="127"/>
<source>Installed packages</source>
<translation>इंस्टॉल हो रखें पैकेज</translation>
</message>
</context>
<context>
<name>PackageGroupModel</name>
<message>
<location filename="Projects/octopi/cachecleaner/packagegroupmodel.cpp" line="199"/>
<source>Clean</source>
<translation>हटाएँ</translation>
</message>
<message>
<location filename="Projects/octopi/cachecleaner/packagegroupmodel.cpp" line="222"/>
<source>Clean %1</source>
<translation>%1 हटाएँ</translation>
</message>
</context>
</TS><|fim▁end|> | <message> |
<|file_name|>global-style.js<|end_file_name|><|fim▁begin|>import { createGlobalStyle } from 'styled-components';
export const GlobalStyle = createGlobalStyle`
body {
margin: 0;
font-family: 'Montserrat', sans-serif;
}<|fim▁hole|> }
`;<|fim▁end|> |
* {
box-sizing: border-box; |
<|file_name|>demo.service.ts<|end_file_name|><|fim▁begin|>import { Injectable } from "@angular/core";
@Injectable()
<|fim▁hole|> sayHello(name:string) {
return "Hello " + name;
}
}<|fim▁end|> | export class DemoService { |
<|file_name|>File_Riff_Elements.cpp<|end_file_name|><|fim▁begin|>/* Copyright (c) MediaArea.net SARL. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license that can
* be found in the License.html file in the root of the source tree.
*/
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//
// Elements part
//
// Contributor: Lionel Duchateau, [email protected]
//
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//---------------------------------------------------------------------------
// Pre-compilation
#include "MediaInfo/PreComp.h"
#ifdef __BORLANDC__
#pragma hdrstop
#endif
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
#include "MediaInfo/Setup.h"
#include <ZenLib/Ztring.h>
#include <string>
using namespace std;
using namespace ZenLib;
//---------------------------------------------------------------------------
//***************************************************************************
// Infos
//***************************************************************************
//---------------------------------------------------------------------------
#if defined(MEDIAINFO_RIFF_YES) || defined(MEDIAINFO_MK_YES)
//---------------------------------------------------------------------------
namespace MediaInfoLib
{
//---------------------------------------------------------------------------
std::string ExtensibleWave_ChannelMask (int32u ChannelMask)
{
std::string Text;
if ((ChannelMask&0x0007)!=0x0000)
Text+="Front:";
if (ChannelMask&0x0001)
Text+=" L";
if (ChannelMask&0x0004)
Text+=" C";
if (ChannelMask&0x0002)
Text+=" R";
if ((ChannelMask&0x0600)!=0x0000)
Text+=", Side:";
if (ChannelMask&0x0200)
Text+=" L";
if (ChannelMask&0x0400)
Text+=" R";
if ((ChannelMask&0x0130)!=0x0000)
Text+=", Back:";
if (ChannelMask&0x0010)
Text+=" L";
if (ChannelMask&0x0100)
Text+=" C";
if (ChannelMask&0x0020)
Text+=" R";
if ((ChannelMask&0x0008)!=0x0000)
Text+=", LFE";
return Text;
}
//---------------------------------------------------------------------------
std::string ExtensibleWave_ChannelMask2 (int32u ChannelMask)
{
std::string Text;
int8u Count=0;
if (ChannelMask&0x0001)
Count++;
if (ChannelMask&0x0004)
Count++;
if (ChannelMask&0x0002)
Count++;
Text+=Ztring::ToZtring(Count).To_UTF8();
Count=0;
if (ChannelMask&0x0200)
Count++;
if (ChannelMask&0x0400)
Count++;
Text+="/"+Ztring::ToZtring(Count).To_UTF8();
Count=0;
if (ChannelMask&0x0010)
Count++;
if (ChannelMask&0x0100)
Count++;
if (ChannelMask&0x0020)
Count++;
Text+="/"+Ztring::ToZtring(Count).To_UTF8();
Count=0;
if (ChannelMask&0x0008)
Text+=".1";
return Text;
}
}
//---------------------------------------------------------------------------
#endif
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
#ifdef MEDIAINFO_RIFF_YES
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
#include "MediaInfo/Multiple/File_Riff.h"
#if defined(MEDIAINFO_DVDIF_YES)
#include "MediaInfo/Multiple/File_DvDif.h"
#endif
#if defined(MEDIAINFO_OGG_YES)
#include "MediaInfo/Multiple/File_Ogg.h"
#include "MediaInfo/Multiple/File_Ogg_SubElement.h"
#endif
#if defined(MEDIAINFO_FFV1_YES)
#include "MediaInfo/Video/File_Ffv1.h"
#endif
#if defined(MEDIAINFO_HUFFYUV_YES)
#include "MediaInfo/Video/File_HuffYuv.h"
#endif
#if defined(MEDIAINFO_MPEG4V_YES)
#include "MediaInfo/Video/File_Mpeg4v.h"
#endif
#if defined(MEDIAINFO_MPEGV_YES)
#include "MediaInfo/Video/File_Mpegv.h"
#endif
#if defined(MEDIAINFO_PRORES_YES)
#include "MediaInfo/Video/File_ProRes.h"
#endif
#if defined(MEDIAINFO_AVC_YES)
#include "MediaInfo/Video/File_Avc.h"
#endif
#if defined(MEDIAINFO_CANOPUS_YES)
#include "MediaInfo/Video/File_Canopus.h"
#endif
#if defined(MEDIAINFO_FRAPS_YES)
#include "MediaInfo/Video/File_Fraps.h"
#endif
#if defined(MEDIAINFO_LAGARITH_YES)
#include "MediaInfo/Video/File_Lagarith.h"
#endif
#if defined(MEDIAINFO_MPEGA_YES)
#include "MediaInfo/Audio/File_Mpega.h"
#endif
#if defined(MEDIAINFO_AAC_YES)
#include "MediaInfo/Audio/File_Aac.h"
#endif
#if defined(MEDIAINFO_AC3_YES)
#include "MediaInfo/Audio/File_Ac3.h"
#endif
#if defined(MEDIAINFO_DTS_YES)
#include "MediaInfo/Audio/File_Dts.h"
#endif
#if defined(MEDIAINFO_JPEG_YES)
#include "MediaInfo/Image/File_Jpeg.h"
#endif
#if defined(MEDIAINFO_SUBRIP_YES)
#include "MediaInfo/Text/File_SubRip.h"
#endif
#if defined(MEDIAINFO_OTHERTEXT_YES)
#include "MediaInfo/Text/File_OtherText.h"
#endif
#if defined(MEDIAINFO_ADPCM_YES)
#include "MediaInfo/Audio/File_Adpcm.h"
#endif
#if defined(MEDIAINFO_PCM_YES)
#include "MediaInfo/Audio/File_Pcm.h"
#endif
#if defined(MEDIAINFO_SMPTEST0337_YES)
#include "MediaInfo/Audio/File_SmpteSt0337.h"
#endif
#if defined(MEDIAINFO_ID3_YES)
#include "MediaInfo/Tag/File_Id3.h"
#endif
#if defined(MEDIAINFO_ID3V2_YES)
#include "MediaInfo/Tag/File_Id3v2.h"
#endif
#if defined(MEDIAINFO_GXF_YES)
#if defined(MEDIAINFO_CDP_YES)
#include "MediaInfo/Text/File_Cdp.h"
#include <cstring>
#endif
#endif //MEDIAINFO_GXF_YES
#include <vector>
#include "MediaInfo/MediaInfo_Config_MediaInfo.h"
using namespace std;
//---------------------------------------------------------------------------
namespace MediaInfoLib
{
//***************************************************************************
// Const
//***************************************************************************
namespace Elements
{
const int32u FORM=0x464F524D;
const int32u LIST=0x4C495354;
const int32u ON2_=0x4F4E3220;
const int32u RIFF=0x52494646;
const int32u RF64=0x52463634;
const int32u AIFC=0x41494643;
const int32u AIFC_COMM=0x434F4D4D;
const int32u AIFC_COMT=0x434F4D54;
const int32u AIFC_FVER=0x46564552;
const int32u AIFC_SSND=0x53534E44;
const int32u AIFF=0x41494646;
const int32u AIFF_COMM=0x434F4D4D;
const int32u AIFF_COMT=0x434F4D54;
const int32u AIFF_SSND=0x53534E44;
const int32u AIFF__c__=0x28632920;
const int32u AIFF_ANNO=0x414E4E4F;
const int32u AIFF_AUTH=0x41555448;
const int32u AIFF_NAME=0x4E414D45;
const int32u AIFF_ID3_=0x49443320;
const int32u AVI_=0x41564920;
const int32u AVI__cset=0x63736574;
const int32u AVI__Cr8r=0x43723872;
const int32u AVI__exif=0x65786966;
const int32u AVI__exif_ecor=0x65636F72;
const int32u AVI__exif_emdl=0x656D646C;
const int32u AVI__exif_emnt=0x656D6E74;
const int32u AVI__exif_erel=0x6572656C;
const int32u AVI__exif_etim=0x6574696D;
const int32u AVI__exif_eucm=0x6575636D;
const int32u AVI__exif_ever=0x65766572;
const int32u AVI__goog=0x676F6F67;
const int32u AVI__goog_GDAT=0x47444154;
const int32u AVI__GMET=0x474D4554;
const int32u AVI__hdlr=0x6864726C;
const int32u AVI__hdlr_avih=0x61766968;
const int32u AVI__hdlr_JUNK=0x4A554E4B;
const int32u AVI__hdlr_strl=0x7374726C;
const int32u AVI__hdlr_strl_indx=0x696E6478;
const int32u AVI__hdlr_strl_JUNK=0x4A554E4B;
const int32u AVI__hdlr_strl_strd=0x73747264;
const int32u AVI__hdlr_strl_strf=0x73747266;
const int32u AVI__hdlr_strl_strh=0x73747268;
const int32u AVI__hdlr_strl_strh_auds=0x61756473;
const int32u AVI__hdlr_strl_strh_iavs=0x69617673;
const int32u AVI__hdlr_strl_strh_mids=0x6D696473;
const int32u AVI__hdlr_strl_strh_vids=0x76696473;
const int32u AVI__hdlr_strl_strh_txts=0x74787473;
const int32u AVI__hdlr_strl_strn=0x7374726E;
const int32u AVI__hdlr_strl_vprp=0x76707270;
const int32u AVI__hdlr_odml=0x6F646D6C;
const int32u AVI__hdlr_odml_dmlh=0x646D6C68;
const int32u AVI__hdlr_ON2h=0x4F4E3268;
const int32u AVI__idx1=0x69647831;
const int32u AVI__INFO=0x494E464F;
const int32u AVI__INFO_IARL=0x4941524C;
const int32u AVI__INFO_IART=0x49415254;
const int32u AVI__INFO_IAS1=0x49415331;
const int32u AVI__INFO_IAS2=0x49415332;
const int32u AVI__INFO_IAS3=0x49415333;
const int32u AVI__INFO_IAS4=0x49415334;
const int32u AVI__INFO_IAS5=0x49415335;
const int32u AVI__INFO_IAS6=0x49415336;
const int32u AVI__INFO_IAS7=0x49415337;
const int32u AVI__INFO_IAS8=0x49415338;
const int32u AVI__INFO_IAS9=0x49415339;
const int32u AVI__INFO_ICDS=0x49434453;
const int32u AVI__INFO_ICMS=0x49434D53;
const int32u AVI__INFO_ICMT=0x49434D54;
const int32u AVI__INFO_ICNT=0x49434E54;
const int32u AVI__INFO_ICOP=0x49434F50;
const int32u AVI__INFO_ICNM=0x49434E4D;
const int32u AVI__INFO_ICRD=0x49435244;
const int32u AVI__INFO_ICRP=0x49435250;
const int32u AVI__INFO_IDIM=0x4944494D;
const int32u AVI__INFO_IDIT=0x49444954;
const int32u AVI__INFO_IDPI=0x49445049;
const int32u AVI__INFO_IDST=0x49445354;
const int32u AVI__INFO_IEDT=0x49454454;
const int32u AVI__INFO_IENG=0x49454E47;
const int32u AVI__INFO_IFRM=0x4946524D;
const int32u AVI__INFO_IGNR=0x49474E52;
const int32u AVI__INFO_IID3=0x49494433;
const int32u AVI__INFO_IKEY=0x494B4559;
const int32u AVI__INFO_ILGT=0x494C4754;
const int32u AVI__INFO_ILNG=0x494C4E47;
const int32u AVI__INFO_ILYC=0x494C5943;
const int32u AVI__INFO_IMED=0x494D4544;
const int32u AVI__INFO_IMP3=0x494D5033;
const int32u AVI__INFO_IMUS=0x494D5553;
const int32u AVI__INFO_INAM=0x494E414D;
const int32u AVI__INFO_IPLT=0x49504C54;
const int32u AVI__INFO_IPDS=0x49504453;
const int32u AVI__INFO_IPRD=0x49505244;
const int32u AVI__INFO_IPRT=0x49505254;
const int32u AVI__INFO_IPRO=0x4950524F;
const int32u AVI__INFO_IRTD=0x49525444;
const int32u AVI__INFO_ISBJ=0x4953424A;
const int32u AVI__INFO_ISGN=0x4953474E;
const int32u AVI__INFO_ISTD=0x49535444;
const int32u AVI__INFO_ISTR=0x49535452;
const int32u AVI__INFO_ISFT=0x49534654;
const int32u AVI__INFO_ISHP=0x49534850;
const int32u AVI__INFO_ISMP=0x49534D50;
const int32u AVI__INFO_ISRC=0x49535243;
const int32u AVI__INFO_ISRF=0x49535246;
const int32u AVI__INFO_ITCH=0x49544348;
const int32u AVI__INFO_IWEB=0x49574542;
const int32u AVI__INFO_IWRI=0x49575249;
const int32u AVI__INFO_JUNK=0x4A554E4B;
const int32u AVI__JUNK=0x4A554E4B;
const int32u AVI__MD5_=0x4D443520;
const int32u AVI__movi=0x6D6F7669;
const int32u AVI__movi_rec_=0x72656320;
const int32u AVI__movi_xxxx_____=0x00005F5F;
const int32u AVI__movi_xxxx___db=0x00006462;
const int32u AVI__movi_xxxx___dc=0x00006463;
const int32u AVI__movi_xxxx___sb=0x00007362;
const int32u AVI__movi_xxxx___tx=0x00007478;
const int32u AVI__movi_xxxx___wb=0x00007762;
const int32u AVI__PrmA=0x50726D41;
const int32u AVI__Tdat=0x54646174;
const int32u AVI__Tdat_rn_A=0x726E5F41;
const int32u AVI__Tdat_rn_O=0x726E5F4F;
const int32u AVI__Tdat_tc_A=0x74635F41;
const int32u AVI__Tdat_tc_O=0x74635F4F;
const int32u AVIX=0x41564958;
const int32u AVIX_idx1=0x69647831;
const int32u AVIX_movi=0x6D6F7669;
const int32u AVIX_movi_rec_=0x72656320;
const int32u CADP=0x43414450;
const int32u CDDA=0x43444441;
const int32u CDDA_fmt_=0x666D7420;
const int32u CMJP=0x434D4A50;
const int32u CMP4=0x434D5034;
const int32u IDVX=0x49445658;
const int32u INDX=0x494E4458;
const int32u JUNK=0x4A554E4B;
const int32u menu=0x6D656E75;
const int32u MThd=0x4D546864;
const int32u MTrk=0x4D54726B;
const int32u PAL_=0x50414C20;
const int32u QLCM=0x514C434D;
const int32u QLCM_fmt_=0x666D7420;
const int32u rcrd=0x72637264;
const int32u rcrd_desc=0x64657363;
const int32u rcrd_fld_=0x666C6420;
const int32u rcrd_fld__anc_=0x616E6320;
const int32u rcrd_fld__anc__pos_=0x706F7320;
const int32u rcrd_fld__anc__pyld=0x70796C64;
const int32u rcrd_fld__finf=0x66696E66;
const int32u RDIB=0x52444942;
const int32u RMID=0x524D4944;
const int32u RMMP=0x524D4D50;
const int32u RMP3=0x524D5033;
const int32u RMP3_data=0x64617461;
const int32u RMP3_INFO=0x494E464F;
const int32u RMP3_INFO_IID3=0x49494433;
const int32u RMP3_INFO_ILYC=0x494C5943;
const int32u RMP3_INFO_IMP3=0x494D5033;
const int32u RMP3_INFO_JUNK=0x4A554E4B;
const int32u SMV0=0x534D5630;
const int32u SMV0_xxxx=0x534D563A;
const int32u WAVE=0x57415645;
const int32u WAVE__pmx=0x20786D70;
const int32u WAVE_aXML=0x61584D4C;
const int32u WAVE_bext=0x62657874;
const int32u WAVE_cue_=0x63756520;
const int32u WAVE_data=0x64617461;
const int32u WAVE_ds64=0x64733634;
const int32u WAVE_fact=0x66616374;
const int32u WAVE_fmt_=0x666D7420;
const int32u WAVE_ID3_=0x49443320;
const int32u WAVE_id3_=0x69643320;
const int32u WAVE_INFO=0x494E464F;
const int32u WAVE_iXML=0x69584D4C;
const int32u wave=0x77617665;
const int32u wave_data=0x64617461;
const int32u wave_fmt_=0x666D7420;
const int32u W3DI=0x57334449;
#define UUID(NAME, PART1, PART2, PART3, PART4, PART5) \
const int64u NAME =0x##PART3##PART2##PART1##ULL; \
const int64u NAME##2=0x##PART4##PART5##ULL; \
UUID(QLCM_QCELP1, 5E7F6D41, B115, 11D0, BA91, 00805FB4B97E)
UUID(QLCM_QCELP2, 5E7F6D42, B115, 11D0, BA91, 00805FB4B97E)
UUID(QLCM_EVRC, E689D48D, 9076, 46B5, 91EF, 736A5100CEB4)
UUID(QLCM_SMV, 8D7C2B75, A797, ED49, 985E, D53C8CC75F84)
}
//***************************************************************************
// Format
//***************************************************************************
//---------------------------------------------------------------------------
void File_Riff::Data_Parse()
{
//Alignement specific
Element_Size-=Alignement_ExtraByte;
DATA_BEGIN
LIST(AIFC)
ATOM_BEGIN
ATOM(AIFC_COMM)
ATOM(AIFC_COMT)
ATOM(AIFC_FVER)
ATOM(AIFC_SSND)
ATOM_DEFAULT(AIFC_xxxx)
ATOM_END_DEFAULT
LIST(AIFF)
ATOM_BEGIN
ATOM(AIFF_COMM)
ATOM(AIFF_COMT)
ATOM(AIFF_ID3_)
LIST_SKIP(AIFF_SSND)
ATOM_DEFAULT(AIFF_xxxx)
ATOM_END_DEFAULT
LIST(AVI_)
ATOM_BEGIN
ATOM(AVI__Cr8r);
ATOM(AVI__cset)
LIST(AVI__exif)
ATOM_DEFAULT_ALONE(AVI__exif_xxxx)
LIST(AVI__goog)
ATOM_BEGIN
ATOM(AVI__goog_GDAT)
ATOM_END
ATOM(AVI__GMET)
LIST(AVI__hdlr)
ATOM_BEGIN
ATOM(AVI__hdlr_avih)
ATOM(AVI__hdlr_JUNK)
LIST(AVI__hdlr_strl)
ATOM_BEGIN
ATOM(AVI__hdlr_strl_indx)
ATOM(AVI__hdlr_strl_JUNK)
ATOM(AVI__hdlr_strl_strd)
ATOM(AVI__hdlr_strl_strf)
ATOM(AVI__hdlr_strl_strh)
ATOM(AVI__hdlr_strl_strn)
ATOM(AVI__hdlr_strl_vprp)
ATOM_END
LIST(AVI__hdlr_odml)
ATOM_BEGIN
ATOM(AVI__hdlr_odml_dmlh)
ATOM_END
ATOM(AVI__hdlr_ON2h)
LIST(AVI__INFO)
ATOM_BEGIN
ATOM(AVI__INFO_IID3)
ATOM(AVI__INFO_ILYC)
ATOM(AVI__INFO_IMP3)
ATOM(AVI__INFO_JUNK)
ATOM_DEFAULT(AVI__INFO_xxxx)
ATOM_END_DEFAULT
ATOM_DEFAULT(AVI__hdlr_xxxx)
ATOM_END_DEFAULT
ATOM(AVI__idx1)
LIST(AVI__INFO)
ATOM_BEGIN
ATOM(AVI__INFO_IID3)
ATOM(AVI__INFO_ILYC)
ATOM(AVI__INFO_IMP3)
ATOM(AVI__INFO_JUNK)
ATOM_DEFAULT(AVI__INFO_xxxx)
ATOM_END_DEFAULT
ATOM(AVI__JUNK)
ATOM(AVI__MD5_)
LIST(AVI__movi)
ATOM_BEGIN
LIST(AVI__movi_rec_)
ATOM_DEFAULT_ALONE(AVI__movi_xxxx)
ATOM_DEFAULT(AVI__movi_xxxx)
ATOM_END_DEFAULT
ATOM(AVI__PrmA);
LIST(AVI__Tdat)
ATOM_BEGIN
ATOM(AVI__Tdat_rn_A)
ATOM(AVI__Tdat_rn_O)
ATOM(AVI__Tdat_tc_A)
ATOM(AVI__Tdat_tc_O)
ATOM_END
ATOM_DEFAULT(AVI__xxxx)
ATOM_END_DEFAULT
LIST(AVIX) //OpenDML
ATOM_BEGIN
ATOM(AVIX_idx1)
LIST(AVIX_movi)
ATOM_BEGIN
LIST(AVIX_movi_rec_)
ATOM_DEFAULT_ALONE(AVIX_movi_xxxx)
ATOM_DEFAULT(AVIX_movi_xxxx)
ATOM_END_DEFAULT
ATOM_END
ATOM_PARTIAL(CADP)
LIST(CDDA)
ATOM_BEGIN
ATOM(CDDA_fmt_)
ATOM_END
ATOM_PARTIAL(CMJP)
ATOM(CMP4)
ATOM(IDVX)
LIST(INDX)
ATOM_DEFAULT_ALONE(INDX_xxxx)
LIST_SKIP(JUNK)
LIST_SKIP(menu)
ATOM(MThd)
LIST_SKIP(MTrk)
LIST_SKIP(PAL_)
LIST(QLCM)
ATOM_BEGIN
ATOM(QLCM_fmt_)
ATOM_END
#if defined(MEDIAINFO_GXF_YES)
LIST(rcrd)
ATOM_BEGIN
ATOM(rcrd_desc)
LIST(rcrd_fld_)
ATOM_BEGIN
LIST(rcrd_fld__anc_)
ATOM_BEGIN
ATOM(rcrd_fld__anc__pos_)
ATOM(rcrd_fld__anc__pyld)
ATOM_END
ATOM(rcrd_fld__finf)
ATOM_END
ATOM_END
#endif //defined(MEDIAINFO_GXF_YES)
LIST_SKIP(RDIB)
LIST_SKIP(RMID)
LIST_SKIP(RMMP)
LIST(RMP3)
ATOM_BEGIN
LIST(RMP3_data)
break;
LIST(RMP3_INFO)
ATOM_BEGIN
ATOM(RMP3_INFO_IID3)
ATOM(RMP3_INFO_ILYC)
ATOM(RMP3_INFO_IMP3)
ATOM(RMP3_INFO_JUNK)
ATOM_DEFAULT(RMP3_INFO_xxxx)
ATOM_END_DEFAULT
ATOM_END
ATOM(SMV0)
ATOM(SMV0_xxxx)
ATOM(W3DI)
LIST(WAVE)
ATOM_BEGIN
ATOM(WAVE__pmx)
ATOM(WAVE_aXML)
ATOM(WAVE_bext)
LIST(WAVE_data)
break;
ATOM(WAVE_cue_)
ATOM(WAVE_ds64)
ATOM(WAVE_fact)
ATOM(WAVE_fmt_)
ATOM(WAVE_ID3_)
ATOM(WAVE_id3_)
LIST(WAVE_INFO)
ATOM_DEFAULT_ALONE(WAVE_INFO_xxxx)
ATOM(WAVE_iXML)
ATOM_END
LIST(wave)
ATOM_BEGIN
LIST(wave_data)
break;
ATOM(wave_fmt_)
ATOM_END
DATA_END
if (Alignement_ExtraByte)
{
Element_Size+=Alignement_ExtraByte;
if (Element_Offset+Alignement_ExtraByte==Element_Size)
Skip_XX(Alignement_ExtraByte, "Alignement");
}
}
//***************************************************************************
// Elements
//***************************************************************************
//---------------------------------------------------------------------------
void File_Riff::AIFC()
{
Data_Accept("AIFF Compressed");
Element_Name("AIFF Compressed");
//Filling
Fill(Stream_General, 0, General_Format, "AIFF");
Stream_Prepare(Stream_Audio);
Kind=Kind_Aiff;
#if MEDIAINFO_EVENTS
StreamIDs_Width[0]=0;
#endif //MEDIAINFO_EVENTS
}
//---------------------------------------------------------------------------
void File_Riff::AIFC_COMM()
{
AIFF_COMM();
}
//---------------------------------------------------------------------------
void File_Riff::AIFC_COMT()
{
AIFF_COMT();
}
//---------------------------------------------------------------------------
void File_Riff::AIFC_FVER()
{
Element_Name("Format Version");
//Parsing
Skip_B4( "Version");
}
//---------------------------------------------------------------------------
void File_Riff::AIFC_SSND()
{
AIFF_SSND();
}
//---------------------------------------------------------------------------
void File_Riff::AIFC_xxxx()
{
AIFF_xxxx();
}
//---------------------------------------------------------------------------
void File_Riff::AIFF()
{
Data_Accept("AIFF");
Element_Name("AIFF");
//Filling
Fill(Stream_General, 0, General_Format, "AIFF");
Stream_Prepare(Stream_Audio);
Kind=Kind_Aiff;
#if MEDIAINFO_EVENTS
StreamIDs_Width[0]=0;
#endif //MEDIAINFO_EVENTS
}
//---------------------------------------------------------------------------
void File_Riff::AIFF_COMM()
{
Element_Name("Common");
int32u numSampleFrames;
int16u numChannels, sampleSize;
float80 sampleRate;
//Parsing
Get_B2 (numChannels, "numChannels");
Get_B4 (numSampleFrames, "numSampleFrames");
Get_B2 (sampleSize, "sampleSize");
Get_BF10(sampleRate, "sampleRate");
if (Data_Remain()) //AIFC
{
int32u compressionType;
Get_C4 (compressionType, "compressionType");
Skip_PA( "compressionName");
//Filling
CodecID_Fill(Ztring().From_CC4(compressionType), Stream_Audio, StreamPos_Last, InfoCodecID_Format_Mpeg4);
Fill(Stream_Audio, StreamPos_Last, Audio_Codec, Ztring().From_CC4(compressionType));
}
else
{
//Filling
Fill(Stream_Audio, StreamPos_Last, Audio_Format, "PCM");
Fill(Stream_Audio, StreamPos_Last, Audio_Codec, "PCM");
}
//Filling
Fill(Stream_Audio, StreamPos_Last, Audio_Channel_s_, numChannels);
Fill(Stream_Audio, StreamPos_Last, Audio_BitDepth, sampleSize);
if (sampleRate)
Fill(Stream_Audio, StreamPos_Last, Audio_Duration, numSampleFrames/sampleRate*1000);
Fill(Stream_Audio, StreamPos_Last, Audio_SamplingRate, sampleRate, 0);
//Compute the current codec ID
Element_Code=(int64u)-1;
Stream_ID=(int32u)-1;
stream_Count=1;
//Specific cases
#if defined(MEDIAINFO_SMPTEST0337_YES)
if (Retrieve(Stream_Audio, 0, Audio_CodecID).empty() && numChannels==2 && sampleSize<=32 && sampleRate==48000) //Some SMPTE ST 337 streams are hidden in PCM stream
{
File_SmpteSt0337* Parser=new File_SmpteSt0337;
Parser->Endianness='B';
Parser->Container_Bits=(int8u)sampleSize;
Parser->ShouldContinueParsing=true;
#if MEDIAINFO_DEMUX
if (Config->Demux_Unpacketize_Get())
{
Parser->Demux_Level=2; //Container
Parser->Demux_UnpacketizeContainer=true;
Demux_Level=4; //Intermediate
}
#endif //MEDIAINFO_DEMUX
Stream[Stream_ID].Parsers.push_back(Parser);
}
#endif
stream& StreamItem = Stream[Stream_ID];
#if defined(MEDIAINFO_PCM_YES)
File_Pcm* Parser=new File_Pcm;
Parser->Codec=Retrieve(Stream_Audio, StreamPos_Last, Audio_CodecID);
if (Parser->Codec.empty() || Parser->Codec==__T("NONE"))
Parser->Endianness='B';
Parser->BitDepth=(int8u)sampleSize;
#if MEDIAINFO_DEMUX
if (Demux_Rate)
Parser->Frame_Count_Valid = float64_int64s(Demux_Rate);
if (Config->Demux_Unpacketize_Get())
{
Parser->Demux_Level=2; //Container
Parser->Demux_UnpacketizeContainer=true;
Demux_Level=4; //Intermediate
}
#else //MEDIAINFO_DEMUX
Parser->Frame_Count_Valid=(int64u)-1; //Disabling it, waiting for SMPTE ST 337 parser reject
#endif //MEDIAINFO_DEMUX
StreamItem.Parsers.push_back(Parser);
StreamItem.IsPcm=true;
StreamItem.StreamKind=Stream_Audio;
#endif
#if MEDIAINFO_DEMUX
BlockAlign=numChannels*sampleSize/8;
AvgBytesPerSec=(int32u)float64_int64s(BlockAlign*(float64)sampleRate);
#endif //MEDIAINFO_DEMUX
Element_Code=(int64u)-1;
Open_Buffer_Init_All();
}
//---------------------------------------------------------------------------
void File_Riff::AIFF_COMT()
{
//Parsing
int16u numComments;
Get_B2(numComments, "numComments");
for (int16u Pos=0; Pos<=numComments; Pos++)
{
Ztring text;
int16u count;
Element_Begin1("Comment");
Skip_B4( "timeStamp");
Skip_B4( "marker");
Get_B2 (count, "count");
count+=count%1; //always even
Get_Local(count, text, "text");
Element_End0();
//Filling
Fill(Stream_General, 0, General_Comment, text);
}
}
//---------------------------------------------------------------------------
void File_Riff::AIFF_SSND()
{
WAVE_data();
}
//---------------------------------------------------------------------------
void File_Riff::AIFF_SSND_Continue()
{
WAVE_data_Continue();
}
//---------------------------------------------------------------------------
void File_Riff::AIFF_xxxx()
{
#define ELEMENT_CASE(_ELEMENT, _NAME) \
case Elements::_ELEMENT : Element_Name(_NAME); Name=_NAME; break;
//Known?
std::string Name;
switch(Element_Code)
{
ELEMENT_CASE(AIFF__c__, "Copyright");
ELEMENT_CASE(AIFF_ANNO, "Comment");
ELEMENT_CASE(AIFF_AUTH, "Performer");
ELEMENT_CASE(AIFF_NAME, "Title");
default : Skip_XX(Element_Size, "Unknown");
return;
}
//Parsing
Ztring text;
Get_Local(Element_Size, text, "text");
//Filling
Fill(Stream_General, 0, Name.c_str(), text);
}
//---------------------------------------------------------------------------
void File_Riff::AVI_()
{
Element_Name("AVI");
//Test if there is only one AVI chunk
if (Status[IsAccepted])
{
Element_Info1("Problem: 2 AVI chunks, this is not normal");
Skip_XX(Element_TotalSize_Get(), "Data");
return;
}
Data_Accept("AVI");
//Filling
Fill(Stream_General, 0, General_Format, "AVI");
Kind=Kind_Avi;
//Configuration
Buffer_MaximumSize=64*1024*1024; //Some big frames are possible (e.g YUV 4:2:2 10 bits 1080p)
}
//---------------------------------------------------------------------------
void File_Riff::AVI__Cr8r()
{
Element_Name("Adobe Premiere Cr8r");
//Parsing
Skip_C4( "FourCC");
Skip_B4( "Size");
Skip_XX(Element_Size-Element_Offset, "Unknown");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__cset()
{
Element_Name("Regional settings");
//Parsing
Skip_L2( "CodePage"); //TODO: take a look about IBM/MS RIFF/MCI Specification 1.0
Skip_L2( "CountryCode");
Skip_L2( "LanguageCode");
Skip_L2( "Dialect");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__exif()
{
Element_Name("Exif (Exchangeable Image File Format)");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__exif_xxxx()
{
Element_Name("Value");
//Parsing
Ztring Value;
Get_Local(Element_Size, Value, "Value");
//Filling
switch (Element_Code)
{
case Elements::AVI__exif_ecor : Fill(Stream_General, 0, "Make", Value); break;
case Elements::AVI__exif_emdl : Fill(Stream_General, 0, "Model", Value); break;
case Elements::AVI__exif_emnt : Fill(Stream_General, 0, "MakerNotes", Value); break;
case Elements::AVI__exif_erel : Fill(Stream_General, 0, "RelatedImageFile", Value); break;
case Elements::AVI__exif_etim : Fill(Stream_General, 0, "Written_Date", Value); break;
case Elements::AVI__exif_eucm : Fill(Stream_General, 0, General_Comment, Value); break;
case Elements::AVI__exif_ever : break; //Exif version
default: Fill(Stream_General, 0, Ztring().From_CC4((int32u)Element_Code).To_Local().c_str(), Value);
}
}
//---------------------------------------------------------------------------
void File_Riff::AVI__goog()
{
Element_Name("Google specific");
//Filling
Fill(Stream_General, 0, General_Format, "Google Video", Unlimited, false, true);
}
//---------------------------------------------------------------------------
void File_Riff::AVI__goog_GDAT()
{
Element_Name("Google datas");
}
//---------------------------------------------------------------------------
// Google Metadata
//
void File_Riff::AVI__GMET()<|fim▁hole|> Ztring Value; Value.From_Local((const char*)(Buffer+Buffer_Offset+0), (size_t)Element_Size);
ZtringListList List;
List.Separator_Set(0, __T("\n"));
List.Separator_Set(1, __T(":"));
List.Max_Set(1, 2);
List.Write(Value);
//Details
#if MEDIAINFO_TRACE
if (Config_Trace_Level)
{
//for (size_t Pos=0; Pos<List.size(); Pos++)
// Details_Add_Info(Pos, List(Pos, 0).To_Local().c_str(), List(Pos, 1));
}
#endif //MEDIAINFO_TRACE
//Filling
for (size_t Pos=0; Pos<List.size(); Pos++)
{
if (List(Pos, 0)==__T("title")) Fill(Stream_General, 0, General_Title, List(Pos, 1));
if (List(Pos, 0)==__T("description")) Fill(Stream_General, 0, General_Title_More, List(Pos, 1));
if (List(Pos, 0)==__T("url")) Fill(Stream_General, 0, General_Title_Url, List(Pos, 1));
if (List(Pos, 0)==__T("docid")) Fill(Stream_General, 0, General_UniqueID, List(Pos, 1));
}
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr()
{
Element_Name("AVI Header");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_avih()
{
Element_Name("File header");
//Parsing
int32u MicrosecPerFrame, Flags;
Get_L4 (MicrosecPerFrame, "MicrosecPerFrame");
Skip_L4( "MaxBytesPerSec");
Skip_L4( "PaddingGranularity");
Get_L4 (Flags, "Flags");
Skip_Flags(Flags, 4, "HasIndex");
Skip_Flags(Flags, 5, "MustUseIndex");
Skip_Flags(Flags, 8, "IsInterleaved");
Skip_Flags(Flags, 9, "UseCKTypeToFindKeyFrames");
Skip_Flags(Flags, 11, "TrustCKType");
Skip_Flags(Flags, 16, "WasCaptureFile");
Skip_Flags(Flags, 17, "Copyrighted");
Get_L4 (avih_TotalFrame, "TotalFrames");
Skip_L4( "InitialFrames");
Skip_L4( "StreamsCount");
Skip_L4( "SuggestedBufferSize");
Skip_L4( "Width");
Skip_L4( "Height");
Skip_L4( "Reserved");
Skip_L4( "Reserved");
Skip_L4( "Reserved");
Skip_L4( "Reserved");
if(Element_Offset<Element_Size)
Skip_XX(Element_Size-Element_Offset, "Unknown");
//Filling
if (MicrosecPerFrame>0)
avih_FrameRate=1000000.0/MicrosecPerFrame;
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_JUNK()
{
Element_Name("Garbage");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_odml()
{
Element_Name("OpenDML");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_odml_dmlh()
{
Element_Name("OpenDML Header");
//Parsing
Get_L4(dmlh_TotalFrame, "GrandFrames");
if (Element_Offset<Element_Size)
Skip_XX(Element_Size-Element_Offset, "Unknown");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_ON2h()
{
Element_Name("On2 header");
//Parsing
Skip_XX(Element_Size, "Unknown");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl()
{
Element_Name("Stream info");
Element_Info1(stream_Count);
//Clean up
StreamKind_Last=Stream_Max;
StreamPos_Last=(size_t)-1;
//Compute the current codec ID
Stream_ID=(('0'+stream_Count/10)*0x01000000
+('0'+stream_Count )*0x00010000);
stream_Count++;
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_indx()
{
Element_Name("Index");
int32u Entry_Count, ChunkId;
int16u LongsPerEntry;
int8u IndexType, IndexSubType;
Get_L2 (LongsPerEntry, "LongsPerEntry"); //Size of each entry in aIndex array
Get_L1 (IndexSubType, "IndexSubType");
Get_L1 (IndexType, "IndexType");
Get_L4 (Entry_Count, "EntriesInUse"); //Index of first unused member in aIndex array
Get_C4 (ChunkId, "ChunkId"); //FCC of what is indexed
//Depends of size of structure...
switch (IndexType)
{
case 0x01 : //AVI_INDEX_OF_CHUNKS
switch (IndexSubType)
{
case 0x00 : AVI__hdlr_strl_indx_StandardIndex(Entry_Count, ChunkId); break;
case 0x01 : AVI__hdlr_strl_indx_FieldIndex(Entry_Count, ChunkId); break; //AVI_INDEX_2FIELD
default: Skip_XX(Element_Size-Element_Offset, "Unknown");
}
break;
case 0x0 : //AVI_INDEX_OF_INDEXES
switch (IndexSubType)
{
case 0x00 :
case 0x01 : AVI__hdlr_strl_indx_SuperIndex(Entry_Count, ChunkId); break; //AVI_INDEX_2FIELD
default: Skip_XX(Element_Size-Element_Offset, "Unknown");
}
break;
default: Skip_XX(Element_Size-Element_Offset, "Unknown");
}
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_indx_StandardIndex(int32u Entry_Count, int32u ChunkId)
{
Element_Name("Standard Index");
//Parsing
int64u BaseOffset, StreamSize=0;
Get_L8 (BaseOffset, "BaseOffset");
Skip_L4( "Reserved3");
for (int32u Pos=0; Pos<Entry_Count; Pos++)
{
//Is too slow
/*
Element_Begin1("Index");
int32u Offset, Size;
Get_L4 (Offset, "Offset"); //BaseOffset + this is absolute file offset
Get_L4 (Size, "Size"); //Bit 31 is set if this is NOT a keyframe
Element_Info1(Size&0x7FFFFFFF);
if (Size)
Element_Info1("KeyFrame");
Element_End0();
*/
//Faster method
if (Element_Offset+8>Element_Size)
break; //Malformed index
int32u Offset=LittleEndian2int32u(Buffer+Buffer_Offset+(size_t)Element_Offset );
int32u Size =LittleEndian2int32u(Buffer+Buffer_Offset+(size_t)Element_Offset+4)&0x7FFFFFFF;
Element_Offset+=8;
//Stream Position and size
if (Pos<300 || MediaInfoLib::Config.ParseSpeed_Get()==1.00)
{
Stream_Structure[BaseOffset+Offset-8].Name=ChunkId&0xFFFF0000;
Stream_Structure[BaseOffset+Offset-8].Size=Size;
}
StreamSize+=(Size&0x7FFFFFFF);
Stream[ChunkId&0xFFFF0000].PacketCount++;
//Interleaved
if (Pos== 0 && (ChunkId&0xFFFF0000)==0x30300000 && Interleaved0_1 ==0)
Interleaved0_1 =BaseOffset+Offset-8;
if (Pos==Entry_Count/10 && (ChunkId&0xFFFF0000)==0x30300000 && Interleaved0_10==0)
Interleaved0_10=BaseOffset+Offset-8;
if (Pos== 0 && (ChunkId&0xFFFF0000)==0x30310000 && Interleaved1_1 ==0)
Interleaved1_1 =BaseOffset+Offset-8;
if (Pos==Entry_Count/10 && (ChunkId&0xFFFF0000)==0x30310000 && Interleaved1_10==0)
Interleaved1_10=BaseOffset+Offset-8;
}
Stream[ChunkId&0xFFFF0000].StreamSize+=StreamSize;
if (Element_Offset<Element_Size)
Skip_XX(Element_Size-Element_Offset, "Garbage");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_indx_FieldIndex(int32u Entry_Count, int32u)
{
Element_Name("Field Index");
//Parsing
Skip_L8( "Offset");
Skip_L4( "Reserved2");
for (int32u Pos=0; Pos<Entry_Count; Pos++)
{
Element_Begin1("Index");
Skip_L4( "Offset"); //BaseOffset + this is absolute file offset
Skip_L4( "Size"); //Bit 31 is set if this is NOT a keyframe
Skip_L4( "OffsetField2"); //Offset to second field
Element_End0();
}
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_indx_SuperIndex(int32u Entry_Count, int32u ChunkId)
{
Element_Name("Index of Indexes");
//Parsing
int64u Offset;
Skip_L4( "Reserved0");
Skip_L4( "Reserved1");
Skip_L4( "Reserved2");
stream& StreamItem = Stream[Stream_ID];
for (int32u Pos=0; Pos<Entry_Count; Pos++)
{
int32u Duration;
Element_Begin1("Index of Indexes");
Get_L8 (Offset, "Offset");
Skip_L4( "Size"); //Size of index chunk at this offset
Get_L4 (Duration, "Duration"); //time span in stream ticks
Index_Pos[Offset]=ChunkId;
StreamItem.indx_Duration+=Duration;
Element_End0();
}
//We needn't anymore Old version
NeedOldIndex=false;
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_JUNK()
{
Element_Name("Garbage");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strd()
{
Element_Name("Stream datas");
//Parsing
Skip_XX(Element_Size, "Unknown");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf()
{
Element_Name("Stream format");
//Parse depending of kind of stream
stream& StreamItem = Stream[Stream_ID];
switch (StreamItem.fccType)
{
case Elements::AVI__hdlr_strl_strh_auds : AVI__hdlr_strl_strf_auds(); break;
case Elements::AVI__hdlr_strl_strh_iavs : AVI__hdlr_strl_strf_iavs(); break;
case Elements::AVI__hdlr_strl_strh_mids : AVI__hdlr_strl_strf_mids(); break;
case Elements::AVI__hdlr_strl_strh_txts : AVI__hdlr_strl_strf_txts(); break;
case Elements::AVI__hdlr_strl_strh_vids : AVI__hdlr_strl_strf_vids(); break;
default : Element_Info1("Unknown");
}
//Registering stream
StreamItem.StreamKind=StreamKind_Last;
StreamItem.StreamPos=StreamPos_Last;
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf_auds()
{
Element_Info1("Audio");
//Parsing
#if !MEDIAINFO_DEMUX
int32u AvgBytesPerSec;
#endif //!MEDIAINFO_DEMUX
int16u FormatTag, Channels;
BitsPerSample=0;
Get_L2 (FormatTag, "FormatTag");
Get_L2 (Channels, "Channels");
Get_L4 (SamplesPerSec, "SamplesPerSec");
Get_L4 (AvgBytesPerSec, "AvgBytesPerSec");
#if MEDIAINFO_DEMUX
Get_L2 (BlockAlign, "BlockAlign");
#else //MEDIAINFO_DEMUX
Skip_L2( "BlockAlign");
#endif //MEDIAINFO_DEMUX
if (Element_Offset+2<=Element_Size)
Get_L2 (BitsPerSample, "BitsPerSample");
if (FormatTag==1) //Only for PCM
{
//Coherancy
if (BitsPerSample && SamplesPerSec*BitsPerSample*Channels/8==AvgBytesPerSec*8)
AvgBytesPerSec*=8; //Found in one file. TODO: Provide information to end user about such error
//Computing of missing value
if (!BitsPerSample && AvgBytesPerSec && SamplesPerSec && Channels)
BitsPerSample=(int16u)(AvgBytesPerSec*8/SamplesPerSec/Channels);
}
//Filling
Stream_Prepare(Stream_Audio);
stream& StreamItem = Stream[Stream_ID];
StreamItem.Compression=FormatTag;
Ztring Codec; Codec.From_Number(FormatTag, 16);
Codec.MakeUpperCase();
CodecID_Fill(Codec, Stream_Audio, StreamPos_Last, InfoCodecID_Format_Riff);
Fill(Stream_Audio, StreamPos_Last, Audio_Codec, Codec); //May be replaced by codec parser
Fill(Stream_Audio, StreamPos_Last, Audio_Codec_CC, Codec);
if (Channels)
Fill(Stream_Audio, StreamPos_Last, Audio_Channel_s_, (Channels!=5 || FormatTag==0xFFFE)?Channels:6);
if (SamplesPerSec)
Fill(Stream_Audio, StreamPos_Last, Audio_SamplingRate, SamplesPerSec);
if (AvgBytesPerSec)
Fill(Stream_Audio, StreamPos_Last, Audio_BitRate, AvgBytesPerSec*8);
if (BitsPerSample)
Fill(Stream_Audio, StreamPos_Last, Audio_BitDepth, BitsPerSample);
StreamItem.AvgBytesPerSec=AvgBytesPerSec; //Saving bitrate for each stream
if (SamplesPerSec && TimeReference!=(int64u)-1)
{
Fill(Stream_Audio, 0, Audio_Delay, float64_int64s(((float64)TimeReference)*1000/SamplesPerSec));
Fill(Stream_Audio, 0, Audio_Delay_Source, "Container (bext)");
}
//Specific cases
#if defined(MEDIAINFO_DTS_YES) || defined(MEDIAINFO_SMPTEST0337_YES)
if (FormatTag==0x1 && Retrieve(Stream_General, 0, General_Format)==__T("Wave")) //Some DTS or SMPTE ST 337 streams are coded "1"
{
#if defined(MEDIAINFO_DTS_YES)
{
File_Dts* Parser=new File_Dts;
Parser->Frame_Count_Valid=2;
Parser->ShouldContinueParsing=true;
#if MEDIAINFO_DEMUX
if (Config->Demux_Unpacketize_Get() && Retrieve(Stream_General, 0, General_Format)==__T("Wave"))
{
Parser->Demux_Level=2; //Container
Parser->Demux_UnpacketizeContainer=true;
Demux_Level=4; //Intermediate
}
#endif //MEDIAINFO_DEMUX
StreamItem.Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_SMPTEST0337_YES)
{
File_SmpteSt0337* Parser=new File_SmpteSt0337;
Parser->Container_Bits=(int8u)BitsPerSample;
Parser->Aligned=true;
Parser->ShouldContinueParsing=true;
#if MEDIAINFO_DEMUX
if (Config->Demux_Unpacketize_Get() && Retrieve(Stream_General, 0, General_Format)==__T("Wave"))
{
Parser->Demux_Level=2; //Container
Parser->Demux_UnpacketizeContainer=true;
Demux_Level=4; //Intermediate
}
#endif //MEDIAINFO_DEMUX
StreamItem.Parsers.push_back(Parser);
}
#endif
}
#endif
//Creating the parser
if (0);
#if defined(MEDIAINFO_MPEGA_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Audio, InfoCodecID_Format_Riff, Codec)==__T("MPEG Audio"))
{
File_Mpega* Parser=new File_Mpega;
Parser->CalculateDelay=true;
Parser->ShouldContinueParsing=true;
StreamItem.Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_AC3_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Audio, InfoCodecID_Format_Riff, Codec)==__T("AC-3"))
{
File_Ac3* Parser=new File_Ac3;
Parser->Frame_Count_Valid=2;
Parser->CalculateDelay=true;
Parser->ShouldContinueParsing=true;
StreamItem.Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_DTS_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Audio, InfoCodecID_Format_Riff, Codec)==__T("DTS"))
{
File_Dts* Parser=new File_Dts;
Parser->Frame_Count_Valid=2;
Parser->ShouldContinueParsing=true;
StreamItem.Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_AAC_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Audio, InfoCodecID_Format_Riff, Codec)==__T("AAC"))
{
File_Aac* Parser=new File_Aac;
Parser->Mode=File_Aac::Mode_ADTS;
Parser->Frame_Count_Valid=1;
Parser->ShouldContinueParsing=true;
StreamItem.Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_PCM_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Audio, InfoCodecID_Format_Riff, Codec)==__T("PCM"))
{
File_Pcm* Parser=new File_Pcm;
Parser->Codec=Codec;
Parser->Endianness='L';
Parser->BitDepth=(int8u)BitsPerSample;
#if MEDIAINFO_DEMUX
if (Demux_Rate)
Parser->Frame_Count_Valid = float64_int64s(Demux_Rate);
if (Config->Demux_Unpacketize_Get() && Retrieve(Stream_General, 0, General_Format)==__T("Wave"))
{
Parser->Demux_Level=2; //Container
Parser->Demux_UnpacketizeContainer=true;
Demux_Level=4; //Intermediate
}
#else //MEDIAINFO_DEMUX
Parser->Frame_Count_Valid=(int64u)-1; //Disabling it, waiting for SMPTE ST 337 parser reject
#endif //MEDIAINFO_DEMUX
stream& StreamItem = Stream[Stream_ID];
StreamItem.Parsers.push_back(Parser);
StreamItem.IsPcm=true;
}
#endif
#if defined(MEDIAINFO_ADPCM_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Audio, InfoCodecID_Format_Riff, Codec)==__T("ADPCM"))
{
//Creating the parser
File_Adpcm MI;
MI.Codec=Codec;
//Parsing
Open_Buffer_Init(&MI);
Open_Buffer_Continue(&MI, 0);
//Filling
Finish(&MI);
Merge(MI, StreamKind_Last, 0, StreamPos_Last);
}
#endif
#if defined(MEDIAINFO_OGG_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Audio, InfoCodecID_Format_Riff, Codec)==__T("Vorbis")
&& FormatTag!=0x566F) //0x566F has config in this chunk
{
File_Ogg* Parser=new File_Ogg;
Parser->ShouldContinueParsing=true;
StreamItem.Parsers.push_back(Parser);
}
#endif
Open_Buffer_Init_All();
//Options
if (Element_Offset+2>Element_Size)
return; //No options
//Parsing
int16u Option_Size;
Get_L2 (Option_Size, "cbSize");
//Filling
if (Option_Size>0)
{
if (0);
else if (MediaInfoLib::Config.CodecID_Get(Stream_Audio, InfoCodecID_Format_Riff, Codec)==__T("MPEG Audio"))
{
if (Option_Size==12)
AVI__hdlr_strl_strf_auds_Mpega();
else
Skip_XX(Option_Size, "MPEG Audio - Uknown");
}
else if (Codec==__T("AAC") || Codec==__T("FF") || Codec==__T("8180"))
AVI__hdlr_strl_strf_auds_Aac();
else if (FormatTag==0x566F) //Vorbis with Config in this chunk
AVI__hdlr_strl_strf_auds_Vorbis();
else if (FormatTag==0x6750) //Vorbis with Config in this chunk
AVI__hdlr_strl_strf_auds_Vorbis2();
else if (FormatTag==0xFFFE) //Extensible Wave
AVI__hdlr_strl_strf_auds_ExtensibleWave();
else if (Element_Offset+Option_Size<=Element_Size)
Skip_XX(Option_Size, "Unknown");
else if (Element_Offset!=Element_Size)
Skip_XX(Element_Size-Element_Offset, "Error");
}
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf_auds_Mpega()
{
//Parsing
Element_Begin1("MPEG Audio options");
Skip_L2( "ID");
Skip_L4( "Flags");
Skip_L2( "BlockSize");
Skip_L2( "FramesPerBlock");
Skip_L2( "CodecDelay");
Element_End0();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf_auds_Aac()
{
//Parsing
Element_Begin1("AAC options");
#if defined(MEDIAINFO_AAC_YES)
File_Aac* MI=new File_Aac();
MI->Mode=File_Aac::Mode_AudioSpecificConfig;
Open_Buffer_Init(MI);
Open_Buffer_Continue(MI);
Finish(MI);
Merge(*MI, StreamKind_Last, 0, StreamPos_Last);
delete MI; //MI=NULL;
#else //MEDIAINFO_MPEG4_YES
Skip_XX(Element_Size-Element_Offset, "(AudioSpecificConfig)");
#endif
Element_End0();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf_auds_Vorbis()
{
//Parsing
Element_Begin1("Vorbis options");
#if defined(MEDIAINFO_OGG_YES)
File_Ogg_SubElement MI;
Open_Buffer_Init(&MI);
Element_Begin1("Element sizes");
//All elements parsing, except last one
std::vector<size_t> Elements_Size;
size_t Elements_TotalSize=0;
int8u Elements_Count;
Get_L1(Elements_Count, "Element count");
Elements_Size.resize(Elements_Count+1); //+1 for the last block
for (int8u Pos=0; Pos<Elements_Count; Pos++)
{
int8u Size;
Get_L1(Size, "Size");
Elements_Size[Pos]=Size;
Elements_TotalSize+=Size;
}
Element_End0();
if (Element_Offset+Elements_TotalSize>Element_Size)
return;
//Adding the last block
Elements_Size[Elements_Count]=(size_t)(Element_Size-(Element_Offset+Elements_TotalSize));
Elements_Count++;
//Parsing blocks
for (int8u Pos=0; Pos<Elements_Count; Pos++)
{
Open_Buffer_Continue(&MI, Elements_Size[Pos]);
Open_Buffer_Continue(&MI, 0);
Element_Offset+=Elements_Size[Pos];
}
//Finalizing
Finish(&MI);
Merge(MI, StreamKind_Last, 0, StreamPos_Last);
Clear(Stream_Audio, StreamPos_Last, Audio_BitDepth); //Resolution is not valid for Vorbis
Element_Show();
#else //MEDIAINFO_MPEG4_YES
Skip_XX(Element_Size-Element_Offset, "(Vorbis headers)");
#endif
Element_End0();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf_auds_Vorbis2()
{
//Parsing
Skip_XX(8, "Vorbis Unknown");
Element_Begin1("Vorbis options");
#if defined(MEDIAINFO_OGG_YES)
stream& StreamItem = Stream[Stream_ID];
Open_Buffer_Continue(StreamItem.Parsers[0]);
Open_Buffer_Continue(StreamItem.Parsers[0], 0);
Finish(StreamItem.Parsers[0]);
Merge(*StreamItem.Parsers[0], StreamKind_Last, 0, StreamPos_Last);
Element_Show();
#else //MEDIAINFO_MPEG4_YES
Skip_XX(Element_Size-Element_Offset, "(Vorbis headers)");
#endif
Element_End0();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf_auds_ExtensibleWave()
{
//Parsing
int128u SubFormat;
int32u ChannelMask;
Skip_L2( "ValidBitsPerSample / SamplesPerBlock");
Get_L4 (ChannelMask, "ChannelMask");
Get_GUID(SubFormat, "SubFormat");
FILLING_BEGIN();
if ((SubFormat.hi&0xFFFFFFFFFFFF0000LL)==0x0010000000000000LL && SubFormat.lo==0x800000AA00389B71LL)
{
CodecID_Fill(Ztring().From_Number((int16u)SubFormat.hi, 16), Stream_Audio, StreamPos_Last, InfoCodecID_Format_Riff);
Fill(Stream_Audio, StreamPos_Last, Audio_CodecID, Ztring().From_GUID(SubFormat), true);
Fill(Stream_Audio, StreamPos_Last, Audio_Codec, MediaInfoLib::Config.Codec_Get(Ztring().From_Number((int16u)SubFormat.hi, 16)), true);
//Creating the parser
if (0);
#if defined(MEDIAINFO_PCM_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Audio, InfoCodecID_Format_Riff, Ztring().From_Number((int16u)SubFormat.hi, 16))==__T("PCM"))
{
//Creating the parser
File_Pcm* Parser=new File_Pcm;
Parser->Codec=Ztring().From_GUID(SubFormat);
Parser->Endianness='L';
Parser->Sign='S';
Parser->BitDepth=(int8u)BitsPerSample;
#if MEDIAINFO_DEMUX
if (Config->Demux_Unpacketize_Get() && Retrieve(Stream_General, 0, General_Format)==__T("Wave"))
{
Parser->Demux_Level=2; //Container
Parser->Demux_UnpacketizeContainer=true;
Demux_Level=4; //Intermediate
}
#endif //MEDIAINFO_DEMUX
stream& StreamItem = Stream[Stream_ID];
StreamItem.Parsers.push_back(Parser);
StreamItem.IsPcm=true;
}
#endif
Open_Buffer_Init_All();
}
else
{
CodecID_Fill(Ztring().From_GUID(SubFormat), Stream_Audio, StreamPos_Last, InfoCodecID_Format_Riff);
}
Fill(Stream_Audio, StreamPos_Last, Audio_ChannelPositions, ExtensibleWave_ChannelMask(ChannelMask));
Fill(Stream_Audio, StreamPos_Last, Audio_ChannelPositions_String2, ExtensibleWave_ChannelMask2(ChannelMask));
FILLING_END();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf_iavs()
{
//Standard video header before Iavs?
if (Element_Size==72)
{
Element_Begin0();
AVI__hdlr_strl_strf_vids();
Element_End0();
}
Element_Info1("Interleaved Audio/Video");
#ifdef MEDIAINFO_DVDIF_YES
if (Element_Size<8*4)
return;
//Parsing
DV_FromHeader=new File_DvDif();
Open_Buffer_Init(DV_FromHeader);
//DVAAuxSrc
((File_DvDif*)DV_FromHeader)->AuxToAnalyze=0x50; //Audio source
Open_Buffer_Continue(DV_FromHeader, 4);
//DVAAuxCtl
((File_DvDif*)DV_FromHeader)->AuxToAnalyze=0x51; //Audio control
Open_Buffer_Continue(DV_FromHeader, Buffer+Buffer_Offset+(size_t)Element_Offset, 4);
Element_Offset+=4;
//DVAAuxSrc1
Skip_L4( "DVAAuxSrc1");
//DVAAuxCtl1
Skip_L4( "DVAAuxCtl1");
//DVVAuxSrc
((File_DvDif*)DV_FromHeader)->AuxToAnalyze=0x60; //Video source
Open_Buffer_Continue(DV_FromHeader, 4);
//DVAAuxCtl
((File_DvDif*)DV_FromHeader)->AuxToAnalyze=0x61; //Video control
Open_Buffer_Continue(DV_FromHeader, 4);
//Reserved
if (Element_Offset<Element_Size)
{
Skip_L4( "DVReserved");
Skip_L4( "DVReserved");
}
Finish(DV_FromHeader);
Stream_Prepare(Stream_Video);
stream& StreamItem = Stream[Stream_ID];
StreamItem.Parsers.push_back(new File_DvDif);
Open_Buffer_Init(StreamItem.Parsers[0]);
#else //MEDIAINFO_DVDIF_YES
//Parsing
Skip_L4( "DVAAuxSrc");
Skip_L4( "DVAAuxCtl");
Skip_L4( "DVAAuxSrc1");
Skip_L4( "DVAAuxCtl1");
Skip_L4( "DVVAuxSrc");
Skip_L4( "DVVAuxCtl");
Skip_L4( "DVReserved");
Skip_L4( "DVReserved");
//Filling
Ztring Codec; Codec.From_CC4(Stream[Stream_ID].fccHandler);
Stream_Prepare(Stream_Video);
float32 FrameRate=Retrieve(Stream_Video, StreamPos_Last, Video_FrameRate).To_float32();
Fill(Stream_Video, StreamPos_Last, Video_Codec, Codec); //May be replaced by codec parser
Fill(Stream_Video, StreamPos_Last, Video_Codec_CC, Codec);
if (Codec==__T("dvsd")
|| Codec==__T("dvsl"))
{
Fill(Stream_Video, StreamPos_Last, Video_Width, 720);
if (FrameRate==25.000) Fill(Stream_Video, StreamPos_Last, Video_Height, 576);
else if (FrameRate==29.970) Fill(Stream_Video, StreamPos_Last, Video_Height, 480);
Fill(Stream_Video, StreamPos_Last, Video_DisplayAspectRatio, 4.0/3, 3, true);
}
else if (Codec==__T("dvhd"))
{
Fill(Stream_Video, StreamPos_Last, Video_Width, 1440);
if (FrameRate==25.000) Fill(Stream_Video, StreamPos_Last, Video_Height, 1152);
else if (FrameRate==30.000) Fill(Stream_Video, StreamPos_Last, Video_Height, 960);
Fill(Stream_Video, StreamPos_Last, Video_DisplayAspectRatio, 4.0/3, 3, true);
}
Stream_Prepare(Stream_Audio);
CodecID_Fill(Codec, Stream_Audio, StreamPos_Last, InfoCodecID_Format_Riff);
Fill(Stream_Audio, StreamPos_Last, Audio_Codec, Codec); //May be replaced by codec parser
Fill(Stream_Audio, StreamPos_Last, Audio_Codec_CC, Codec);
#endif //MEDIAINFO_DVDIF_YES
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf_mids()
{
Element_Info1("Midi");
//Filling
Stream_Prepare(Stream_Audio);
Fill(Stream_Audio, StreamPos_Last, Audio_Format, "MIDI");
Fill(Stream_Audio, StreamPos_Last, Audio_Codec, "Midi");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf_txts()
{
Element_Info1("Text");
//Parsing
Ztring Format;
if (Element_Size)
{
Get_Local(10, Format, "Format");
Skip_XX(22, "Unknown");
}
FILLING_BEGIN_PRECISE();
Stream_Prepare(Stream_Text);
if (Element_Size==0)
{
//Creating the parser
stream& StreamItem = Stream[Stream_ID];
#if defined(MEDIAINFO_SUBRIP_YES)
StreamItem.Parsers.push_back(new File_SubRip);
#endif
#if defined(MEDIAINFO_OTHERTEXT_YES)
StreamItem.Parsers.push_back(new File_OtherText); //For SSA
#endif
Open_Buffer_Init_All();
}
else
{
Fill(Stream_Text, StreamPos_Last, Text_Format, Format);
}
FILLING_END();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf_vids()
{
Element_Info1("Video");
//Parsing
int32u Compression, Width, Height;
int16u Resolution;
Skip_L4( "Size");
Get_L4 (Width, "Width");
Get_L4 (Height, "Height");
Skip_L2( "Planes");
Get_L2 (Resolution, "BitCount"); //Do not use it
Get_C4 (Compression, "Compression");
Skip_L4( "SizeImage");
Skip_L4( "XPelsPerMeter");
Skip_L4( "YPelsPerMeter");
Skip_L4( "ClrUsed");
Skip_L4( "ClrImportant");
//Filling
Stream[Stream_ID].Compression=Compression;
if (Compression==CC4("DXSB"))
{
//Divx.com hack for subtitle, this is a text stream in a DivX Format
Fill(Stream_General, 0, General_Format, "DivX", Unlimited, true, true);
Stream_Prepare(Stream_Text);
}
else
Stream_Prepare(Stream_Video);
//Filling
CodecID_Fill(Ztring().From_CC4(Compression), StreamKind_Last, StreamPos_Last, InfoCodecID_Format_Riff);
Fill(StreamKind_Last, StreamPos_Last, Fill_Parameter(StreamKind_Last, Generic_Codec), Ztring().From_CC4(Compression).To_Local().c_str()); //FormatTag, may be replaced by codec parser
Fill(StreamKind_Last, StreamPos_Last, Fill_Parameter(StreamKind_Last, Generic_Codec_CC), Ztring().From_CC4(Compression).To_Local().c_str()); //FormatTag
Fill(StreamKind_Last, StreamPos_Last, "Width", Width, 10, true);
Fill(StreamKind_Last, StreamPos_Last, "Height", Height>=0x80000000?(-((int32s)Height)):Height, 10, true); // AVI can use negative height for raw to signal that it's coded top-down, not bottom-up
if (Resolution==32 && Compression==0x74736363) //tscc
Fill(StreamKind_Last, StreamPos_Last, "BitDepth", 8);
else if (Compression==0x44495633) //DIV3
Fill(StreamKind_Last, StreamPos_Last, "BitDepth", 8);
else if (MediaInfoLib::Config.CodecID_Get(StreamKind_Last, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression)).find(__T("Canopus"))!=std::string::npos) //Canopus codecs
Fill(StreamKind_Last, StreamPos_Last, "BitDepth", Resolution/3);
else if (Compression==0x44585342) //DXSB
Fill(StreamKind_Last, StreamPos_Last, "BitDepth", Resolution);
else if (MediaInfoLib::Config.CodecID_Get(StreamKind_Last, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression), InfoCodecID_ColorSpace).find(__T("RGBA"))!=std::string::npos) //RGB codecs
Fill(StreamKind_Last, StreamPos_Last, "BitDepth", Resolution/4);
else if (Compression==0x00000000 //RGB
|| MediaInfoLib::Config.CodecID_Get(StreamKind_Last, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression), InfoCodecID_ColorSpace).find(__T("RGB"))!=std::string::npos) //RGB codecs
{
if (Resolution==32)
{
Fill(StreamKind_Last, StreamPos_Last, Fill_Parameter(StreamKind_Last, Generic_Format), "RGBA", Unlimited, true, true);
Fill(StreamKind_Last, StreamPos_Last, "BitDepth", Resolution/4); //With Alpha
}
else
Fill(StreamKind_Last, StreamPos_Last, "BitDepth", Resolution<=16?8:(Resolution/3)); //indexed or normal
}
else if (Compression==0x56503632 //VP62
|| MediaInfoLib::Config.CodecID_Get(StreamKind_Last, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression), InfoCodecID_Format)==__T("H.263") //H.263
|| MediaInfoLib::Config.CodecID_Get(StreamKind_Last, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression), InfoCodecID_Format)==__T("VC-1")) //VC-1
Fill(StreamKind_Last, StreamPos_Last, "BitDepth", Resolution/3);
Stream[Stream_ID].StreamKind=StreamKind_Last;
//Creating the parser
if (0);
#if defined(MEDIAINFO_FFV1_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Video, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression), InfoCodecID_Format)==__T("FFV1"))
{
File_Ffv1* Parser=new File_Ffv1;
Parser->Width=Width;
Parser->Height=Height;
Stream[Stream_ID].Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_HUFFYUV_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Video, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression), InfoCodecID_Format)==__T("HuffYUV"))
{
File_HuffYuv* Parser=new File_HuffYuv;
Stream[Stream_ID].Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_MPEGV_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Video, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression), InfoCodecID_Format)==__T("MPEG Video"))
{
File_Mpegv* Parser=new File_Mpegv;
Parser->FrameIsAlwaysComplete=true;
Parser->TimeCodeIsNotTrustable=true;
Stream[Stream_ID].Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_MPEG4V_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Video, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression))==__T("MPEG-4 Visual"))
{
File_Mpeg4v* Parser=new File_Mpeg4v;
Stream[Stream_ID].Specific_IsMpeg4v=true;
Parser->FrameIsAlwaysComplete=true;
if (MediaInfoLib::Config.ParseSpeed_Get()>=0.5)
Parser->ShouldContinueParsing=true;
Stream[Stream_ID].Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_PRORES_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Video, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression), InfoCodecID_Format)==__T("ProRes"))
{
File_ProRes* Parser=new File_ProRes;
Stream[Stream_ID].Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_AVC_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Video, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression))==__T("AVC"))
{
File_Avc* Parser=new File_Avc;
Parser->FrameIsAlwaysComplete=true;
Stream[Stream_ID].Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_CANOPUS_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Video, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression))==__T("Canopus HQ"))
{
File_Canopus* Parser=new File_Canopus;
Stream[Stream_ID].Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_JPEG_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Video, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression))==__T("JPEG"))
{
File_Jpeg* Parser=new File_Jpeg;
Parser->StreamKind=Stream_Video;
Stream[Stream_ID].Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_DVDIF_YES)
else if (MediaInfoLib::Config.CodecID_Get(Stream_Video, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression))==__T("DV"))
{
File_DvDif* Parser=new File_DvDif;
Parser->IgnoreAudio=true;
Stream[Stream_ID].Parsers.push_back(Parser);
}
#endif
#if defined(MEDIAINFO_FRAPS_YES)
else if (Compression==0x46505331) //"FPS1"
{
File_Fraps* Parser=new File_Fraps;
Stream[Stream_ID].Parsers.push_back(Parser);
}
#endif
else if (Compression==0x48465955) //"HFUY"
{
switch (Resolution)
{
case 16 : Fill(Stream_Video, StreamPos_Last, Video_ColorSpace, "YUV"); Fill(Stream_Video, StreamPos_Last, Video_ChromaSubsampling, "4:2:2"); Fill(Stream_Video, StreamPos_Last, Video_BitDepth, 8); break;
case 24 : Fill(Stream_Video, StreamPos_Last, Video_ColorSpace, "RGB"); Fill(Stream_Video, StreamPos_Last, Video_BitDepth, 8); break;
case 32 : Fill(Stream_Video, StreamPos_Last, Video_ColorSpace, "RGBA"); Fill(Stream_Video, StreamPos_Last, Video_BitDepth, 8); break;
default : ;
}
}
#if defined(MEDIAINFO_LAGARITH_YES)
else if (Compression==0x4C414753) //"LAGS"
{
File_Lagarith* Parser=new File_Lagarith;
Stream[Stream_ID].Parsers.push_back(Parser);
}
#endif
Open_Buffer_Init_All();
//Options
if (Element_Offset>=Element_Size)
return; //No options
//Filling
if (0);
else if (MediaInfoLib::Config.CodecID_Get(Stream_Video, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression))==__T("AVC"))
AVI__hdlr_strl_strf_vids_Avc();
else if (MediaInfoLib::Config.CodecID_Get(Stream_Video, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression))==__T("FFV1"))
AVI__hdlr_strl_strf_vids_Ffv1();
else if (MediaInfoLib::Config.CodecID_Get(Stream_Video, InfoCodecID_Format_Riff, Ztring().From_CC4(Compression))==__T("HuffYUV"))
AVI__hdlr_strl_strf_vids_HuffYUV(Resolution, Height);
else Skip_XX(Element_Size-Element_Offset, "Unknown");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf_vids_Avc()
{
//Parsing
Element_Begin1("AVC options");
#if defined(MEDIAINFO_AVC_YES)
//Can be sized block or with 000001
stream& StreamItem = Stream[Stream_ID];
File_Avc* Parser=(File_Avc*)StreamItem.Parsers[0];
Parser->MustParse_SPS_PPS=false;
Parser->SizedBlocks=false;
Parser->MustSynchronize=true;
int64u Element_Offset_Save=Element_Offset;
Open_Buffer_Continue(Parser);
if (!Parser->Status[IsAccepted])
{
Element_Offset=Element_Offset_Save;
delete StreamItem.Parsers[0]; StreamItem.Parsers[0]=new File_Avc;
Parser=(File_Avc*)StreamItem.Parsers[0];
Open_Buffer_Init(Parser);
Parser->FrameIsAlwaysComplete=true;
Parser->MustParse_SPS_PPS=true;
Parser->SizedBlocks=true;
Parser->MustSynchronize=false;
Open_Buffer_Continue(Parser);
Element_Show();
}
#else //MEDIAINFO_AVC_YES
Skip_XX(Element_Size-Element_Offset, "(AVC headers)");
#endif
Element_End0();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf_vids_Ffv1()
{
//Parsing
Element_Begin1("FFV1 options");
#if defined(MEDIAINFO_FFV1_YES)
File_Ffv1* Parser=(File_Ffv1*)Stream[Stream_ID].Parsers[0];
Open_Buffer_OutOfBand(Parser);
#else //MEDIAINFO_FFV1_YES
Skip_XX(Element_Size-Element_Offset, "(FFV1 headers)");
#endif
Element_End0();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strf_vids_HuffYUV(int16u BitCount, int32u Height)
{
//Parsing
Element_Begin1("HuffYUV options");
#if defined(MEDIAINFO_HUFFYUV_YES)
File_HuffYuv* Parser=(File_HuffYuv*)Stream[Stream_ID].Parsers[0];
Parser->IsOutOfBandData=true;
Parser->BitCount=BitCount;
Parser->Height=Height;
Open_Buffer_Continue(Parser);
#else //MEDIAINFO_HUFFYUV_YES
Skip_XX(Element_Size-Element_Offset, "(HuffYUV headers)");
#endif
Element_End0();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strh()
{
Element_Name("Stream header");
//Parsing
int32u fccType, fccHandler, Scale, Rate, Start, Length;
int16u Left, Top, Right, Bottom;
Get_C4 (fccType, "fccType");
switch (fccType)
{
case Elements::AVI__hdlr_strl_strh_auds :
Get_L4 (fccHandler, "fccHandler");
break;
default:
Get_C4 (fccHandler, "fccHandler");
}
Skip_L4( "Flags");
Skip_L2( "Priority");
Skip_L2( "Language");
Skip_L4( "InitialFrames");
Get_L4 (Scale, "Scale");
Get_L4 (Rate, "Rate"); //Rate/Scale is stream tick rate in ticks/sec
Get_L4 (Start, "Start");
Get_L4 (Length, "Length");
Skip_L4( "SuggestedBufferSize");
Skip_L4( "Quality");
Skip_L4( "SampleSize");
Get_L2 (Left, "Frame_Left");
Get_L2 (Top, "Frame_Top");
Get_L2 (Right, "Frame_Right");
Get_L2 (Bottom, "Frame_Bottom");
if(Element_Offset<Element_Size)
Skip_XX(Element_Size-Element_Offset, "Unknown");
//Filling
float32 FrameRate=0;
if (Rate>0 && Scale>0)
{
//FrameRate (without known value detection)
FrameRate=((float32)Rate)/Scale;
if (FrameRate>1)
{
float32 Rest=FrameRate-(int32u)FrameRate;
if (Rest<0.01)
FrameRate-=Rest;
else if (Rest>0.99)
FrameRate+=1-Rest;
else
{
float32 Rest1001=FrameRate*1001/1000-(int32u)(FrameRate*1001/1000);
if (Rest1001<0.001)
FrameRate=(float32)((int32u)(FrameRate*1001/1000))*1000/1001;
if (Rest1001>0.999)
FrameRate=(float32)((int32u)(FrameRate*1001/1000)+1)*1000/1001;
}
}
//Duration
if (FrameRate)
{
int64u Duration=float32_int64s((1000*(float32)Length)/FrameRate);
if (avih_TotalFrame>0 //avih_TotalFrame is here because some files have a wrong Audio Duration if TotalFrame==0 (which is a bug, of course!)
&& (avih_FrameRate==0 || Duration<((float32)avih_TotalFrame)/avih_FrameRate*1000*1.10) //Some file have a nearly perfect header, except that the value is false, trying to detect it (false if 10% more than 1st video)
&& (avih_FrameRate==0 || Duration>((float32)avih_TotalFrame)/avih_FrameRate*1000*0.90)) //Some file have a nearly perfect header, except that the value is false, trying to detect it (false if 10% less than 1st video)
{
Fill(StreamKind_Last, StreamPos_Last, "Duration", Duration);
}
}
}
switch (fccType)
{
case Elements::AVI__hdlr_strl_strh_vids :
if (FrameRate>0) Fill(Stream_Video, StreamPos_Last, Video_FrameRate, FrameRate, 3);
if (Right-Left>0) Fill(Stream_Video, StreamPos_Last, Video_Width, Right-Left, 10, true);
if (Bottom-Top>0) Fill(Stream_Video, StreamPos_Last, Video_Height, Bottom-Top, 10, true);
break;
case Elements::AVI__hdlr_strl_strh_txts :
if (Right-Left>0) Fill(Stream_Text, StreamPos_Last, Text_Width, Right-Left, 10, true);
if (Bottom-Top>0) Fill(Stream_Text, StreamPos_Last, Text_Height, Bottom-Top, 10, true);
break;
default: ;
}
stream& StreamItem = Stream[Stream_ID];
StreamItem.fccType=fccType;
StreamItem.fccHandler=fccHandler;
StreamItem.Scale=Scale;
StreamItem.Rate=Rate;
StreamItem.Start=Start;
StreamItem.Length=Length;
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_strn()
{
Element_Name("Stream name");
//Parsing
Ztring Title;
Get_Local(Element_Size, Title, "StreamName");
//Filling
Fill(StreamKind_Last, StreamPos_Last, "Title", Title);
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_strl_vprp()
{
Element_Name("Video properties");
//Parsing
int32u FieldPerFrame;
int16u FrameAspectRatio_H, FrameAspectRatio_W;
Skip_L4( "VideoFormatToken");
Skip_L4( "VideoStandard");
Skip_L4( "VerticalRefreshRate");
Skip_L4( "HTotalInT");
Skip_L4( "VTotalInLines");
Get_L2 (FrameAspectRatio_H, "FrameAspectRatio Height");
Get_L2 (FrameAspectRatio_W, "FrameAspectRatio Width");
Skip_L4( "FrameWidthInPixels");
Skip_L4( "FrameHeightInLines");
Get_L4 (FieldPerFrame, "FieldPerFrame");
vector<int32u> VideoYValidStartLines;
for (int32u Pos=0; Pos<FieldPerFrame; Pos++)
{
Element_Begin1("Field");
int32u VideoYValidStartLine;
Skip_L4( "CompressedBMHeight");
Skip_L4( "CompressedBMWidth");
Skip_L4( "ValidBMHeight");
Skip_L4( "ValidBMWidth");
Skip_L4( "ValidBMXOffset");
Skip_L4( "ValidBMYOffset");
Skip_L4( "VideoXOffsetInT");
Get_L4 (VideoYValidStartLine, "VideoYValidStartLine");
VideoYValidStartLines.push_back(VideoYValidStartLine);
Element_End0();
}
if(Element_Offset<Element_Size)
Skip_XX(Element_Size-Element_Offset, "Unknown");
FILLING_BEGIN();
if (FrameAspectRatio_H && FrameAspectRatio_W)
Fill(Stream_Video, 0, Video_DisplayAspectRatio, ((float32)FrameAspectRatio_W)/FrameAspectRatio_H, 3);
switch (FieldPerFrame)
{
case 1 :
Fill(Stream_Video, 0, Video_ScanType, "Progressive");
break;
case 2 :
Fill(Stream_Video, 0, Video_ScanType, "Interlaced");
if (VideoYValidStartLines.size()==2 && VideoYValidStartLines[0]<VideoYValidStartLines[1])
Fill(Stream_Video, 0, Video_ScanOrder, "TFF");
if (VideoYValidStartLines.size()==2 && VideoYValidStartLines[0]>VideoYValidStartLines[1])
Fill(Stream_Video, 0, Video_ScanOrder, "BFF");
default: ;
}
FILLING_END();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__hdlr_xxxx()
{
AVI__INFO_xxxx();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__idx1()
{
Element_Name("Index (old)");
//Tests
if (!NeedOldIndex || Idx1_Offset==(int64u)-1)
{
Skip_XX(Element_Size, "Data");
return;
}
//Testing malformed index (index is based on start of the file, wrong)
if (16<=Element_Size && Idx1_Offset+4==LittleEndian2int32u(Buffer+Buffer_Offset+(size_t)Element_Offset+ 8))
Idx1_Offset=0; //Fixing base of movi atom, the index think it is the start of the file
//Parsing
while (Element_Offset+16<=Element_Size)
{
//Is too slow
/*
int32u ChunkID, Offset, Size;
Element_Begin1("Index");
Get_C4 (ChunkID, "ChunkID"); //Bit 31 is set if this is NOT a keyframe
Info_L4(Flags, "Flags");
Skip_Flags(Flags, 0, "NoTime");
Skip_Flags(Flags, 1, "Lastpart");
Skip_Flags(Flags, 2, "Firstpart");
Skip_Flags(Flags, 3, "Midpart");
Skip_Flags(Flags, 4, "KeyFrame");
Get_L4 (Offset, "Offset"); //qwBaseOffset + this is absolute file offset
Get_L4 (Size, "Size"); //Bit 31 is set if this is NOT a keyframe
Element_Info1(Ztring().From_CC4(ChunkID));
Element_Info1(Size);
//Stream Pos and Size
int32u StreamID=(ChunkID&0xFFFF0000);
Stream[StreamID].StreamSize+=Size;
Stream[StreamID].PacketCount++;
Stream_Structure[Idx1_Offset+Offset].Name=StreamID;
Stream_Structure[Idx1_Offset+Offset].Size=Size;
Element_End0();
*/
//Faster method
int32u StreamID=BigEndian2int32u (Buffer+Buffer_Offset+(size_t)Element_Offset )&0xFFFF0000;
int32u Offset =LittleEndian2int32u(Buffer+Buffer_Offset+(size_t)Element_Offset+ 8);
int32u Size =LittleEndian2int32u(Buffer+Buffer_Offset+(size_t)Element_Offset+12);
stream& Stream_Item=Stream[StreamID];
Stream_Item.StreamSize+=Size;
Stream_Item.PacketCount++;
stream_structure& Stream_Structure_Item=Stream_Structure[Idx1_Offset+Offset];
Stream_Structure_Item.Name=StreamID;
Stream_Structure_Item.Size=Size;
Element_Offset+=16;
}
//Interleaved
size_t Pos0=0;
size_t Pos1=0;
for (std::map<int64u, stream_structure>::iterator Temp=Stream_Structure.begin(); Temp!=Stream_Structure.end(); ++Temp)
{
switch (Temp->second.Name)
{
case 0x30300000 :
if (Interleaved0_1==0) Interleaved0_1=Temp->first;
if (Interleaved0_10==0)
{
Pos0++;
if (Pos0>1)
Interleaved0_10=Temp->first;
}
break;
case 0x30310000 :
if (Interleaved1_1==0) Interleaved1_1=Temp->first;
if (Interleaved1_10==0)
{
Pos1++;
if (Pos1>1)
Interleaved1_10=Temp->first;
}
break;
default:;
}
}
}
//---------------------------------------------------------------------------
void File_Riff::AVI__INFO()
{
Element_Name("Tags");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__INFO_IID3()
{
Element_Name("ID3 Tag");
//Parsing
#if defined(MEDIAINFO_ID3_YES)
File_Id3 MI;
Open_Buffer_Init(&MI);
Open_Buffer_Continue(&MI);
Finish(&MI);
Merge(MI, Stream_General, 0, 0);
#endif
}
//---------------------------------------------------------------------------
void File_Riff::AVI__INFO_ILYC()
{
Element_Name("Lyrics");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__INFO_IMP3()
{
Element_Name("MP3 Information");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__INFO_JUNK()
{
Element_Name("Garbage");
}
//---------------------------------------------------------------------------
// List of information atoms
// Name X bytes, Pos=0
//
void File_Riff::AVI__INFO_xxxx()
{
//Parsing
Ztring Value;
Get_Local(Element_Size, Value, "Value");
//Filling
stream_t StreamKind=Stream_General;
size_t StreamPos=0;
size_t Parameter=(size_t)-1;
switch (Element_Code)
{
case 0x00000000 : Parameter=General_Comment; break;
case Elements::AVI__INFO_IARL : Parameter=General_Archival_Location; break;
case Elements::AVI__INFO_IART : Parameter=General_Director; break;
case Elements::AVI__INFO_IAS1 : Parameter=Audio_Language; StreamKind=Stream_Audio; StreamPos=0; break;
case Elements::AVI__INFO_IAS2 : Parameter=Audio_Language; StreamKind=Stream_Audio; StreamPos=1; break;
case Elements::AVI__INFO_IAS3 : Parameter=Audio_Language; StreamKind=Stream_Audio; StreamPos=2; break;
case Elements::AVI__INFO_IAS4 : Parameter=Audio_Language; StreamKind=Stream_Audio; StreamPos=3; break;
case Elements::AVI__INFO_IAS5 : Parameter=Audio_Language; StreamKind=Stream_Audio; StreamPos=4; break;
case Elements::AVI__INFO_IAS6 : Parameter=Audio_Language; StreamKind=Stream_Audio; StreamPos=5; break;
case Elements::AVI__INFO_IAS7 : Parameter=Audio_Language; StreamKind=Stream_Audio; StreamPos=6; break;
case Elements::AVI__INFO_IAS8 : Parameter=Audio_Language; StreamKind=Stream_Audio; StreamPos=7; break;
case Elements::AVI__INFO_IAS9 : Parameter=Audio_Language; StreamKind=Stream_Audio; StreamPos=8; break;
case Elements::AVI__INFO_ICDS : Parameter=General_CostumeDesigner; break;
case Elements::AVI__INFO_ICMS : Parameter=General_CommissionedBy; break;
case Elements::AVI__INFO_ICMT : Parameter=General_Comment; break;
case Elements::AVI__INFO_ICNM : Parameter=General_DirectorOfPhotography; break;
case Elements::AVI__INFO_ICNT : Parameter=General_Movie_Country; break;
case Elements::AVI__INFO_ICOP : Parameter=General_Copyright; break;
case Elements::AVI__INFO_ICRD : Parameter=General_Recorded_Date; Value.Date_From_String(Value.To_Local().c_str()); break;
case Elements::AVI__INFO_ICRP : Parameter=General_Cropped; break;
case Elements::AVI__INFO_IDIM : Parameter=General_Dimensions; break;
case Elements::AVI__INFO_IDIT : Parameter=General_Mastered_Date; Value.Date_From_String(Value.To_Local().c_str()); break;
case Elements::AVI__INFO_IDPI : Parameter=General_DotsPerInch; break;
case Elements::AVI__INFO_IDST : Parameter=General_DistributedBy; break;
case Elements::AVI__INFO_IEDT : Parameter=General_EditedBy; break;
case Elements::AVI__INFO_IENG : Parameter=General_EncodedBy; break;
case Elements::AVI__INFO_IGNR : Parameter=General_Genre; break;
case Elements::AVI__INFO_IFRM : Parameter=General_Part_Position_Total; break;
case Elements::AVI__INFO_IKEY : Parameter=General_Keywords; break;
case Elements::AVI__INFO_ILGT : Parameter=General_Lightness; break;
case Elements::AVI__INFO_ILNG : Parameter=Audio_Language; StreamKind=Stream_Audio; break;
case Elements::AVI__INFO_IMED : Parameter=General_OriginalSourceMedium; break;
case Elements::AVI__INFO_IMUS : Parameter=General_MusicBy; break;
case Elements::AVI__INFO_INAM : Parameter=General_Title; break;
case Elements::AVI__INFO_IPDS : Parameter=General_ProductionDesigner; break;
case Elements::AVI__INFO_IPLT : Parameter=General_OriginalSourceForm_NumColors; break;
case Elements::AVI__INFO_IPRD : Parameter=General_OriginalSourceForm_Name; break;
case Elements::AVI__INFO_IPRO : Parameter=General_Producer; break;
case Elements::AVI__INFO_IPRT : Parameter=General_Part_Position; break;
case Elements::AVI__INFO_IRTD : Parameter=General_LawRating; break;
case Elements::AVI__INFO_ISBJ : Parameter=General_Subject; break;
case Elements::AVI__INFO_ISFT : Parameter=General_Encoded_Application; break;
case Elements::AVI__INFO_ISGN : Parameter=General_Genre; break;
case Elements::AVI__INFO_ISHP : Parameter=General_OriginalSourceForm_Sharpness; break;
case Elements::AVI__INFO_ISRC : Parameter=General_OriginalSourceForm_DistributedBy; break;
case Elements::AVI__INFO_ISRF : Parameter=General_OriginalSourceForm; break;
case Elements::AVI__INFO_ISTD : Parameter=General_ProductionStudio; break;
case Elements::AVI__INFO_ISTR : Parameter=General_Performer; break;
case Elements::AVI__INFO_ITCH : Parameter=General_EncodedBy; break;
case Elements::AVI__INFO_IWEB : Parameter=General_Movie_Url; break;
case Elements::AVI__INFO_IWRI : Parameter=General_WrittenBy; break;
default : ;
}
Element_Name(MediaInfoLib::Config.Info_Get(StreamKind, Parameter, Info_Name));
Element_Info1(Value);
switch (Element_Code)
{
case Elements::AVI__INFO_ISMP : INFO_ISMP=Value;
break;
case Elements::AVI__INFO_IGNR :
{
Ztring ISGN=Retrieve(Stream_General, 0, General_Genre);
Clear(Stream_General, 0, General_Genre);
Fill(StreamKind, StreamPos, General_Genre, Value);
if (!ISGN.empty())
Fill(StreamKind, StreamPos, General_Genre, ISGN);
}
break;
default :
if (!Value.empty())
{
if (Parameter!=(size_t)-1)
Fill(StreamKind, StreamPos, Parameter, Value);
else
Fill(StreamKind, StreamPos, Ztring().From_CC4((int32u)Element_Code).To_Local().c_str(), Value, true);
}
}
}
//---------------------------------------------------------------------------
void File_Riff::AVI__JUNK()
{
Element_Name("Garbage"); //Library defined size for padding, often used to store library name
if (Element_Size<8)
{
Skip_XX(Element_Size, "Junk");
return;
}
//Detect DivX files
if (CC5(Buffer+Buffer_Offset)==CC5("DivX "))
{
Fill(Stream_General, 0, General_Format, "DivX", Unlimited, true, true);
}
//MPlayer
else if (CC8(Buffer+Buffer_Offset)==CC8("[= MPlay") && Retrieve(Stream_General, 0, General_Encoded_Library).empty())
Fill(Stream_General, 0, General_Encoded_Library, "MPlayer");
//Scenalyzer
else if (CC8(Buffer+Buffer_Offset)==CC8("scenalyz") && Retrieve(Stream_General, 0, General_Encoded_Library).empty())
Fill(Stream_General, 0, General_Encoded_Library, "Scenalyzer");
//FFMpeg broken files detection
else if (CC8(Buffer+Buffer_Offset)==CC8("odmldmlh"))
dmlh_TotalFrame=0; //this is not normal to have this string in a JUNK block!!! and in files tested, in this case TotalFrame is broken too
//VirtualDubMod
else if (CC8(Buffer+Buffer_Offset)==CC8("INFOISFT"))
{
int32u Size=LittleEndian2int32u(Buffer+Buffer_Offset+8);
if (Size>Element_Size-12)
Size=(int32u)Element_Size-12;
Fill(Stream_General, 0, General_Encoded_Library, (const char*)(Buffer+Buffer_Offset+12), Size);
}
else if (CC8(Buffer+Buffer_Offset)==CC8("INFOIENG"))
{
int32u Size=LittleEndian2int32u(Buffer+Buffer_Offset+8);
if (Size>Element_Size-12)
Size=(int32u)Element_Size-12;
Fill(Stream_General, 0, General_Encoded_Library, (const char*)(Buffer+Buffer_Offset+12), Size);
}
//Other libraries?
else if (CC1(Buffer+Buffer_Offset)>=CC1("A") && CC1(Buffer+Buffer_Offset)<=CC1("z") && Retrieve(Stream_General, 0, General_Encoded_Library).empty())
Fill(Stream_General, 0, General_Encoded_Library, (const char*)(Buffer+Buffer_Offset), (size_t)Element_Size);
Skip_XX(Element_Size, "Data");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__MD5_()
{
//Parsing
while (Element_Offset<Element_Size)
{
int128u MD5Stored;
Get_L16 (MD5Stored, "MD5");
Ztring MD5_PerItem;
MD5_PerItem.From_Number(MD5Stored, 16);
while (MD5_PerItem.size()<32)
MD5_PerItem.insert(MD5_PerItem.begin(), '0'); //Padding with 0, this must be a 32-byte string
MD5_PerItem.MakeLowerCase();
MD5s.push_back(MD5_PerItem);
}
}
//---------------------------------------------------------------------------
void File_Riff::AVI__movi()
{
Element_Name("Datas");
//Only the first time, no need in AVIX
if (movi_Size==0)
{
Idx1_Offset=File_Offset+Buffer_Offset-4;
BookMark_Set(); //Remenbering this place, for stream parsing in phase 2
//For each stream
std::map<int32u, stream>::iterator Temp=Stream.begin();
while (Temp!=Stream.end())
{
if ((Temp->second.Parsers.empty() || Temp->second.Parsers[0]==NULL) && Temp->second.fccType!=Elements::AVI__hdlr_strl_strh_txts)
{
Temp->second.SearchingPayload=false;
stream_Count--;
}
++Temp;
}
}
//Probing rec (with index, this is not always tested in the flow
if (Element_Size<12)
{
Element_WaitForMoreData();
return;
}
if (CC4(Buffer+Buffer_Offset+8)==0x72656320) //"rec "
rec__Present=true;
//Filling
if (!SecondPass)
movi_Size+=Element_TotalSize_Get();
//We must parse moov?
if (NeedOldIndex || (stream_Count==0 && Index_Pos.empty()))
{
//Jumping
#if MEDIAINFO_TRACE
if (Trace_Activated)
Param("Data", Ztring("(")+Ztring::ToZtring(Element_TotalSize_Get())+Ztring(" bytes)"));
#endif //MEDIAINFO_TRACE
Element_Offset=Element_TotalSize_Get(); //Not using Skip_XX() because we want to skip data we don't have, and Skip_XX() does a test on size of buffer
return;
}
//Jump to next useful data
AVI__movi_StreamJump();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__movi_rec_()
{
Element_Name("Syncronisation");
rec__Present=true;
}
//---------------------------------------------------------------------------
void File_Riff::AVI__movi_rec__xxxx()
{
AVI__movi_xxxx();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__movi_xxxx()
{
if (Element_Code==Elements::AVI__JUNK)
{
Skip_XX(Element_Size, "Junk");
AVI__movi_StreamJump();
return;
}
if (Element_Code!=(int64u)-1)
Stream_ID=(int32u)(Element_Code&0xFFFF0000);
else
Stream_ID=(int32u)-1;
if (Stream_ID==0x69780000) //ix..
{
//AVI Standard Index Chunk
AVI__hdlr_strl_indx();
Stream_ID=(int32u)(Element_Code&0x0000FFFF)<<16;
AVI__movi_StreamJump();
return;
}
if ((Element_Code&0x0000FFFF)==0x00006978) //..ix (Out of specs, but found in a Adobe After Effects CS4 DV file
{
//AVI Standard Index Chunk
AVI__hdlr_strl_indx();
Stream_ID=(int32u)(Element_Code&0xFFFF0000);
AVI__movi_StreamJump();
return;
}
stream& StreamItem = Stream[Stream_ID];
#if MEDIAINFO_DEMUX
if (StreamItem.Rate) //AVI
{
int64u Element_Code_Old=Element_Code;
Element_Code=((Element_Code_Old>>24)&0xF)*10+((Element_Code_Old>>16)&0xF);
Frame_Count_NotParsedIncluded= StreamItem.PacketPos;
FrameInfo.DTS=Frame_Count_NotParsedIncluded*1000000000* StreamItem.Scale/ StreamItem.Rate;
Demux(Buffer+Buffer_Offset, (size_t)Element_Size, ContentType_MainStream);
Element_Code=Element_Code_Old;
Frame_Count_NotParsedIncluded=(int64u)-1;
}
else //WAV
{
//TODO
}
#endif //MEDIAINFO_DEMUX
StreamItem.PacketPos++;
//Finished?
if (!StreamItem.SearchingPayload)
{
Element_DoNotShow();
AVI__movi_StreamJump();
return;
}
#if MEDIAINFO_TRACE
if (Config_Trace_Level)
{
switch (Element_Code&0x0000FFFF) //2 last bytes
{
case Elements::AVI__movi_xxxx_____ : Element_Info1("DV"); break;
case Elements::AVI__movi_xxxx___db :
case Elements::AVI__movi_xxxx___dc : Element_Info1("Video"); break;
case Elements::AVI__movi_xxxx___sb :
case Elements::AVI__movi_xxxx___tx : Element_Info1("Text"); break;
case Elements::AVI__movi_xxxx___wb : Element_Info1("Audio"); break;
default : Element_Info1("Unknown"); break;
}
Element_Info1(Stream[Stream_ID].PacketPos);
}
#endif //MEDIAINFO_TRACE
//Some specific stuff
switch (Element_Code&0x0000FFFF) //2 last bytes
{
case Elements::AVI__movi_xxxx___tx : AVI__movi_xxxx___tx(); break;
default : ;
}
//Parsing
for (size_t Pos=0; Pos<StreamItem.Parsers.size(); Pos++)
if (StreamItem.Parsers[Pos])
{
if (FrameInfo.PTS!=(int64u)-1)
StreamItem.Parsers[Pos]->FrameInfo.PTS=FrameInfo.PTS;
if (FrameInfo.DTS!=(int64u)-1)
StreamItem.Parsers[Pos]->FrameInfo.DTS=FrameInfo.DTS;
Open_Buffer_Continue(StreamItem.Parsers[Pos], Buffer+Buffer_Offset+(size_t)Element_Offset, (size_t)(Element_Size-Element_Offset));
Element_Show();
if (StreamItem.Parsers.size()==1 && StreamItem.Parsers[Pos]->Buffer_Size>0)
StreamItem.ChunksAreComplete=false;
if (StreamItem.Parsers.size()>1)
{
if (!StreamItem.Parsers[Pos]->Status[IsAccepted] && StreamItem.Parsers[Pos]->Status[IsFinished])
{
delete *(StreamItem.Parsers.begin()+Pos);
StreamItem.Parsers.erase(StreamItem.Parsers.begin()+Pos);
Pos--;
}
else if (StreamItem.Parsers.size()>1 && StreamItem.Parsers[Pos]->Status[IsAccepted])
{
File__Analyze* Parser= StreamItem.Parsers[Pos];
for (size_t Pos2=0; Pos2<StreamItem.Parsers.size(); Pos2++)
{
if (Pos2!=Pos)
delete *(StreamItem.Parsers.begin()+Pos2);
}
StreamItem.Parsers.clear();
StreamItem.Parsers.push_back(Parser);
Pos=0;
}
}
#if MEDIAINFO_DEMUX
if (Config->Demux_EventWasSent)
{
Demux_Parser= StreamItem.Parsers[Pos];
return;
}
#endif //MEDIAINFO_DEMUX
}
Element_Offset=Element_Size;
//Some specific stuff
switch (Element_Code&0x0000FFFF) //2 last bytes
{
case Elements::AVI__movi_xxxx_____ :
case Elements::AVI__movi_xxxx___db :
case Elements::AVI__movi_xxxx___dc : AVI__movi_xxxx___dc(); break;
case Elements::AVI__movi_xxxx___wb : AVI__movi_xxxx___wb(); break;
default : ;
}
//We must always parse moov?
AVI__movi_StreamJump();
Element_Show();
}
//---------------------------------------------------------------------------
void File_Riff::AVI__movi_xxxx___dc()
{
//Finish (if requested)
stream& StreamItem = Stream[Stream_ID];
if (StreamItem.Parsers.empty()
|| StreamItem.Parsers[0]->Status[IsFinished]
|| (StreamItem.PacketPos>=300 && MediaInfoLib::Config.ParseSpeed_Get()<1.00))
{
StreamItem.SearchingPayload=false;
stream_Count--;
return;
}
}
//---------------------------------------------------------------------------
void File_Riff::AVI__movi_xxxx___tx()
{
//Parsing
int32u Name_Size;
Ztring Value;
int32u GAB2;
Peek_B4(GAB2);
if (GAB2==0x47414232 && Element_Size>=17)
{
Skip_C4( "GAB2");
Skip_L1( "Zero");
Skip_L2( "CodePage"); //2=Unicode
Get_L4 (Name_Size, "Name_Size");
Skip_UTF16L(Name_Size, "Name");
Skip_L2( "Four");
Skip_L4( "File_Size");
if (Element_Offset>Element_Size)
Element_Offset=Element_Size; //Problem
}
//Skip it
Stream[Stream_ID].SearchingPayload=false;
stream_Count--;
}
//---------------------------------------------------------------------------
void File_Riff::AVI__movi_xxxx___wb()
{
//Finish (if requested)
stream& StreamItem = Stream[Stream_ID];
if (StreamItem.PacketPos>=4 //For having the chunk alignement
&& (StreamItem.Parsers.empty()
|| StreamItem.Parsers[0]->Status[IsFinished]
|| (StreamItem.PacketPos>=300 && MediaInfoLib::Config.ParseSpeed_Get()<1.00)))
{
StreamItem.SearchingPayload=false;
stream_Count--;
}
}
//---------------------------------------------------------------------------
void File_Riff::AVI__movi_StreamJump()
{
//Jump to next useful data
if (!Index_Pos.empty())
{
if (Index_Pos.begin()->first<=File_Offset+Buffer_Offset && Element_Code!=Elements::AVI__movi)
Index_Pos.erase(Index_Pos.begin());
int64u ToJump=File_Size;
if (!Index_Pos.empty())
ToJump=Index_Pos.begin()->first;
if (ToJump>File_Size)
ToJump=File_Size;
if (ToJump>=File_Offset+Buffer_Offset+Element_TotalSize_Get(Element_Level-2)) //We want always Element movi
{
#if MEDIAINFO_HASH
if (Config->File_Hash_Get().to_ulong() && SecondPass)
Hash_ParseUpTo=File_Offset+Buffer_Offset+Element_TotalSize_Get(Element_Level-2);
else
#endif //MEDIAINFO_HASH
GoTo(File_Offset+Buffer_Offset+Element_TotalSize_Get(Element_Level-2), "AVI"); //Not in this chunk
}
else if (ToJump!=File_Offset+Buffer_Offset+(Element_Code==Elements::AVI__movi?0:Element_Size))
{
#if MEDIAINFO_HASH
if (Config->File_Hash_Get().to_ulong() && SecondPass)
Hash_ParseUpTo=File_Offset+Buffer_Offset+Element_TotalSize_Get(Element_Level-2);
else
#endif //MEDIAINFO_HASH
GoTo(ToJump, "AVI"); //Not just after
}
}
else if (stream_Count==0)
{
//Jumping
Element_Show();
if (rec__Present)
Element_End0();
Info("movi, Jumping to end of chunk");
if (SecondPass)
{
std::map<int32u, stream>::iterator Temp=Stream.begin();
while (Temp!=Stream.end())
{
for (size_t Pos=0; Pos<Temp->second.Parsers.size(); ++Pos)
{
Temp->second.Parsers[Pos]->Fill();
Temp->second.Parsers[Pos]->Open_Buffer_Unsynch();
}
++Temp;
}
Finish("AVI"); //The rest is already parsed
}
else
GoTo(File_Offset+Buffer_Offset+Element_TotalSize_Get(), "AVI");
}
else if (Stream_Structure_Temp!=Stream_Structure.end())
{
do
Stream_Structure_Temp++;
while (Stream_Structure_Temp!=Stream_Structure.end() && !(Stream[(int32u)Stream_Structure_Temp->second.Name].SearchingPayload && Config->ParseSpeed<1.0));
if (Stream_Structure_Temp!=Stream_Structure.end())
{
int64u ToJump=Stream_Structure_Temp->first;
if (ToJump>=File_Offset+Buffer_Offset+Element_TotalSize_Get(Element_Level-2))
{
#if MEDIAINFO_HASH
if (Config->File_Hash_Get().to_ulong() && SecondPass)
Hash_ParseUpTo=File_Offset+Buffer_Offset+Element_TotalSize_Get(Element_Level-2);
else
#endif //MEDIAINFO_HASH
GoTo(File_Offset+Buffer_Offset+Element_TotalSize_Get(Element_Level-2), "AVI"); //Not in this chunk
}
else if (ToJump!=File_Offset+Buffer_Offset+Element_Size)
{
#if MEDIAINFO_HASH
if (Config->File_Hash_Get().to_ulong() && SecondPass)
Hash_ParseUpTo=ToJump;
else
#endif //MEDIAINFO_HASH
GoTo(ToJump, "AVI"); //Not just after
}
}
else
Finish("AVI");
}
}
//---------------------------------------------------------------------------
void File_Riff::AVI__PrmA()
{
Element_Name("Adobe Premiere PrmA");
//Parsing
int32u FourCC, Size;
Get_C4 (FourCC, "FourCC");
Get_B4 (Size, "Size");
switch (FourCC)
{
case 0x50415266:
if (Size==20)
{
int32u PAR_X, PAR_Y;
Skip_B4( "Unknown");
Get_B4 (PAR_X, "PAR_X");
Get_B4 (PAR_Y, "PAR_Y");
if (PAR_Y)
PAR=((float64)PAR_X)/PAR_Y;
}
else
Skip_XX(Element_Size-Element_Offset, "Unknown");
break;
default:
for (int32u Pos=8; Pos<Size; Pos++)
Skip_B4( "Unknown");
}
}
//---------------------------------------------------------------------------
void File_Riff::AVI__Tdat()
{
Element_Name("Adobe Premiere Tdat");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__Tdat_tc_A()
{
Element_Name("tc_A");
//Parsing
Ztring Value;
Get_Local(Element_Size, Value, "Unknown");
if (Value.find_first_not_of(__T("0123456789:;"))==string::npos)
Tdat_tc_A=Value;
}
//---------------------------------------------------------------------------
void File_Riff::AVI__Tdat_tc_O()
{
Element_Name("tc_O");
//Parsing
Ztring Value;
Get_Local(Element_Size, Value, "Unknown");
if (Value.find_first_not_of(__T("0123456789:;"))==string::npos)
Tdat_tc_O=Value;
}
//---------------------------------------------------------------------------
void File_Riff::AVI__Tdat_rn_A()
{
Element_Name("rn_A");
//Parsing
Skip_Local(Element_Size, "Unknown");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__Tdat_rn_O()
{
Element_Name("rn_O");
//Parsing
Skip_Local(Element_Size, "Unknown");
}
//---------------------------------------------------------------------------
void File_Riff::AVI__xxxx()
{
Stream_ID=(int32u)(Element_Code&0xFFFF0000);
if (Stream_ID==0x69780000) //ix..
{
//AVI Standard Index Chunk
AVI__hdlr_strl_indx();
Stream_ID=(int32u)(Element_Code&0x0000FFFF)<<16;
AVI__movi_StreamJump();
return;
}
if ((Element_Code&0x0000FFFF)==0x00006978) //..ix (Out of specs, but found in a Adobe After Effects CS4 DV file
{
//AVI Standard Index Chunk
AVI__hdlr_strl_indx();
Stream_ID=(int32u)(Element_Code&0xFFFF0000);
AVI__movi_StreamJump();
return;
}
}
//---------------------------------------------------------------------------
void File_Riff::AVIX()
{
//Filling
Fill(Stream_General, 0, General_Format_Profile, "OpenDML", Unlimited, true, true);
}
//---------------------------------------------------------------------------
void File_Riff::AVIX_idx1()
{
AVI__idx1();
}
//---------------------------------------------------------------------------
void File_Riff::AVIX_movi()
{
AVI__movi();
}
//---------------------------------------------------------------------------
void File_Riff::AVIX_movi_rec_()
{
AVI__movi_rec_();
}
//---------------------------------------------------------------------------
void File_Riff::AVIX_movi_rec__xxxx()
{
AVIX_movi_xxxx();
}
//---------------------------------------------------------------------------
void File_Riff::AVIX_movi_xxxx()
{
AVI__movi_xxxx();
}
//---------------------------------------------------------------------------
void File_Riff::CADP()
{
Element_Name("CMP4 - ADPCM");
//Testing if we have enough data
if (Element_Size<4)
{
Element_WaitForMoreData();
return;
}
//Parsing
int32u Codec;
Get_C4 (Codec, "Codec");
#if MEDIAINFO_TRACE
if (Trace_Activated)
Param("Data", Ztring("(")+Ztring::ToZtring(Element_TotalSize_Get()-Element_Offset)+Ztring(" bytes)"));
#endif //MEDIAINFO_TRACE
Element_Offset=Element_TotalSize_Get(); //Not using Skip_XX() because we want to skip data we don't have, and Skip_XX() does a test on size of buffer
FILLING_BEGIN();
Stream_Prepare(Stream_Audio);
if (Codec==0x41647063) //Adpc
Fill(Stream_Audio, StreamPos_Last, Audio_Format, "ADPCM");
Fill(Stream_Audio, StreamPos_Last, Audio_StreamSize, Element_TotalSize_Get());
FILLING_END();
}
//---------------------------------------------------------------------------
void File_Riff::CDDA()
{
Element_Name("Compact Disc for Digital Audio");
//Filling
Accept("CDDA");
}
//---------------------------------------------------------------------------
void File_Riff::CDDA_fmt_()
{
//Specs: http://fr.wikipedia.org/wiki/Compact_Disc_Audio_track
//Specs: http://www.moon-soft.com/program/FORMAT/sound/cda.htm
Element_Name("Stream format");
//Parsing
int32u id;
int16u Version, tracknb=1;
int8u TPositionF=0, TPositionS=0, TPositionM=0, TDurationF=0, TDurationS=0, TDurationM=0;
Get_L2 (Version, "Version"); // Always 1
if (Version!=1)
{
//Not supported
Skip_XX(Element_Size-2, "Data");
return;
}
Get_L2 (tracknb, "Number"); // Start at 1
Get_L4 (id, "id");
Skip_L4( "offset"); // in frames //Priority of FSM format
Skip_L4( "Duration"); // in frames //Priority of FSM format
Get_L1 (TPositionF, "Track_PositionF"); // in frames
Get_L1 (TPositionS, "Track_PositionS"); // in seconds
Get_L1 (TPositionM, "Track_PositionM"); // in minutes
Skip_L1( "empty");
Get_L1 (TDurationF, "Track_DurationF"); // in frames
Get_L1 (TDurationS, "Track_DurationS"); // in seconds
Get_L1 (TDurationM, "Track_DurationM"); // in minutes
Skip_L1( "empty");
FILLING_BEGIN();
int32u TPosition=TPositionM*60*75+TPositionS*75+TPositionF;
int32u TDuration=TDurationM*60*75+TDurationS*75+TDurationF;
Fill(Stream_General, 0, General_Track_Position, tracknb);
Fill(Stream_General, 0, General_Format, "CDDA");
Fill(Stream_General, 0, General_Format_Info, "Compact Disc for Digital Audio");
Fill(Stream_General, 0, General_UniqueID, id);
Fill(Stream_General, 0, General_FileSize, File_Size+TDuration*2352, 10, true);
Stream_Prepare(Stream_Audio);
Fill(Stream_Audio, 0, Audio_Format, "PCM");
Fill(Stream_Audio, 0, Audio_Format_Settings_Endianness, "Little");
Fill(Stream_Audio, 0, Audio_BitDepth, 16);
Fill(Stream_Audio, 0, Audio_Channel_s_, 2);
Fill(Stream_Audio, 0, Audio_SamplingRate, 44100);
Fill(Stream_Audio, 0, Audio_FrameRate, (float)75);
Fill(Stream_Audio, 0, Audio_BitRate, 1411200);
Fill(Stream_Audio, 0, Audio_Compression_Mode, "Lossless");
Fill(Stream_Audio, 0, Audio_FrameCount, TDuration);
Fill(Stream_Audio, 0, Audio_Duration, float32_int32s(((float32)TDuration)*1000/75));
Fill(Stream_Audio, 0, Audio_Delay, float32_int32s(((float32)TPosition)*1000/75));
//No more need data
Finish("CDDA");
FILLING_END();
}
//---------------------------------------------------------------------------
void File_Riff::CMJP()
{
Element_Name("CMP4 - JPEG");
//Parsing
#ifdef MEDIAINFO_JPEG_YES
Stream_ID=0;
File_Jpeg* Parser=new File_Jpeg;
Open_Buffer_Init(Parser);
Parser->StreamKind=Stream_Video;
Open_Buffer_Continue(Parser);
Element_Offset=Element_TotalSize_Get(); //Not using Skip_XX() because we want to skip data we don't have, and Skip_XX() does a test on size of buffer
FILLING_BEGIN();
Stream_Prepare(Stream_Video);
Fill(Stream_Video, StreamPos_Last, Video_StreamSize, Element_TotalSize_Get());
Finish(Parser);
Merge(*Parser, StreamKind_Last, 0, StreamPos_Last);
FILLING_END();
Stream[Stream_ID].Parsers.push_back(Parser);
#else
Element_Offset=Element_TotalSize_Get(); //Not using Skip_XX() because we want to skip data we don't have, and Skip_XX() does a test on size of buffer
FILLING_BEGIN();
Stream_Prepare(Stream_Video);
Fill(Stream_Video, StreamPos_Last, Video_Format, "JPEG");
Fill(Stream_Video, StreamPos_Last, Video_StreamSize, Element_TotalSize_Get());
FILLING_END();
#endif
}
//---------------------------------------------------------------------------
void File_Riff::CMP4()
{
Accept("CMP4");
Element_Name("CMP4 - Header");
//Parsing
Ztring Title;
Get_Local(Element_Size, Title, "Title");
FILLING_BEGIN();
Fill(Stream_General, 0, General_Format, "CMP4");
Fill(Stream_General, 0, "Title", Title);
FILLING_END();
}
//---------------------------------------------------------------------------
void File_Riff::IDVX()
{
Element_Name("Tags");
}
//---------------------------------------------------------------------------
void File_Riff::INDX()
{
Element_Name("Index (from which spec?)");
}
//---------------------------------------------------------------------------
void File_Riff::INDX_xxxx()
{
Stream_ID=(int32u)(Element_Code&0xFFFF0000);
if (Stream_ID==0x69780000) //ix..
{
//Index
int32u Entry_Count, ChunkId;
int16u LongsPerEntry;
int8u IndexType, IndexSubType;
Get_L2 (LongsPerEntry, "LongsPerEntry"); //Size of each entry in aIndex array
Get_L1 (IndexSubType, "IndexSubType");
Get_L1 (IndexType, "IndexType");
Get_L4 (Entry_Count, "EntriesInUse"); //Index of first unused member in aIndex array
Get_C4 (ChunkId, "ChunkId"); //FCC of what is indexed
Skip_L4( "Unknown");
Skip_L4( "Unknown");
Skip_L4( "Unknown");
for (int32u Pos=0; Pos<Entry_Count; Pos++)
{
Skip_L8( "Offset");
Skip_L4( "Size");
Skip_L4( "Frame number?");
Skip_L4( "Frame number?");
Skip_L4( "Zero");
}
}
//Currently, we do not use the index
//TODO: use the index
Stream_Structure.clear();
}
//---------------------------------------------------------------------------
void File_Riff::JUNK()
{
Element_Name("Junk");
//Parse
#if MEDIAINFO_TRACE
if (Trace_Activated)
Param("Junk", Ztring("(")+Ztring::ToZtring(Element_TotalSize_Get())+Ztring(" bytes)"));
#endif //MEDIAINFO_TRACE
Element_Offset=Element_TotalSize_Get(); //Not using Skip_XX() because we want to skip data we don't have, and Skip_XX() does a test on size of buffer
}
//---------------------------------------------------------------------------
void File_Riff::menu()
{
Element_Name("DivX Menu");
//Filling
Stream_Prepare(Stream_Menu);
Fill(Stream_Menu, StreamPos_Last, Menu_Format, "DivX Menu");
Fill(Stream_Menu, StreamPos_Last, Menu_Codec, "DivX");
}
//---------------------------------------------------------------------------
void File_Riff::MThd()
{
Element_Name("MIDI header");
//Parsing
Skip_B2( "format");
Skip_B2( "ntrks");
Skip_B2( "division");
FILLING_BEGIN_PRECISE();
Accept("MIDI");
Fill(Stream_General, 0, General_Format, "MIDI");
FILLING_ELSE();
Reject("MIDI");
FILLING_END();
}
//---------------------------------------------------------------------------
void File_Riff::MTrk()
{
Element_Name("MIDI Track");
//Parsing
#if MEDIAINFO_TRACE
if (Trace_Activated)
Param("Data", Ztring("(")+Ztring::ToZtring(Element_TotalSize_Get())+Ztring(" bytes)"));
#endif //MEDIAINFO_TRACE
Element_Offset=Element_TotalSize_Get(); //Not using Skip_XX() because we want to skip data we don't have, and Skip_XX() does a test on size of buffer
FILLING_BEGIN();
Stream_Prepare(Stream_Audio);
Fill(Stream_Audio, StreamPos_Last, Audio_Format, "MIDI");
Fill(Stream_Audio, StreamPos_Last, Audio_Codec, "Midi");
Finish("MIDI");
FILLING_END();
}
//---------------------------------------------------------------------------
void File_Riff::PAL_()
{
Data_Accept("RIFF Palette");
Element_Name("RIFF Palette");
//Filling
Fill(Stream_General, 0, General_Format, "RIFF Palette");
}
//---------------------------------------------------------------------------
void File_Riff::QLCM()
{
Data_Accept("QLCM");
Element_Name("QLCM");
//Filling
Fill(Stream_General, 0, General_Format, "QLCM");
}
//---------------------------------------------------------------------------
void File_Riff::QLCM_fmt_()
{
//Parsing
Ztring codec_name;
int128u codec_guid;
int32u num_rates;
int16u codec_version, average_bps, packet_size, block_size, sampling_rate, sample_size;
int8u major, minor;
Get_L1 (major, "major");
Get_L1 (minor, "minor");
Get_GUID(codec_guid, "codec-guid");
Get_L2 (codec_version, "codec-version");
Get_Local(80, codec_name, "codec-name");
Get_L2 (average_bps, "average-bps");
Get_L2 (packet_size, "packet-size");
Get_L2 (block_size, "block-size");
Get_L2 (sampling_rate, "sampling-rate");
Get_L2 (sample_size, "sample-size");
Element_Begin1("rate-map-table");
Get_L4 (num_rates, "num-rates");
for (int32u rate=0; rate<num_rates; rate++)
{
Skip_L2( "rate-size");
Skip_L2( "rate-octet");
}
Element_End0();
Skip_L4( "Reserved");
Skip_L4( "Reserved");
Skip_L4( "Reserved");
Skip_L4( "Reserved");
if (Element_Offset<Element_Size)
Skip_L4( "Reserved"); //Some files don't have the 5th reserved dword
FILLING_BEGIN_PRECISE();
Stream_Prepare (Stream_Audio);
switch (codec_guid.hi)
{
case Elements::QLCM_QCELP1 :
case Elements::QLCM_QCELP2 : Fill(Stream_Audio, 0, Audio_Format, "QCELP"); Fill(Stream_Audio, 0, Audio_Codec, "QCELP"); break;
case Elements::QLCM_EVRC : Fill(Stream_Audio, 0, Audio_Format, "EVRC"); Fill(Stream_Audio, 0, Audio_Codec, "EVRC"); break;
case Elements::QLCM_SMV : Fill(Stream_Audio, 0, Audio_Format, "SMV"); Fill(Stream_Audio, 0, Audio_Codec, "SMV"); break;
default : ;
}
Fill(Stream_Audio, 0, Audio_BitRate, average_bps);
Fill(Stream_Audio, 0, Audio_SamplingRate, sampling_rate);
Fill(Stream_Audio, 0, Audio_BitDepth, sample_size);
Fill(Stream_Audio, 0, Audio_Channel_s_, 1);
FILLING_END();
}
#if defined(MEDIAINFO_GXF_YES)
//---------------------------------------------------------------------------
void File_Riff::rcrd()
{
Data_Accept("Ancillary media packets");
Element_Name("Ancillary media packets");
//Filling
if (Retrieve(Stream_General, 0, General_Format).empty())
Fill(Stream_General, 0, General_Format, "Ancillary media packets"); //GXF, RDD14-2007
//Clearing old data
if (Ancillary)
{
(*Ancillary)->FrameInfo.DTS=FrameInfo.DTS;
Open_Buffer_Continue(*Ancillary, Buffer, 0);
}
}
//---------------------------------------------------------------------------
void File_Riff::rcrd_desc()
{
Element_Name("Ancillary media packet description");
//Parsing
int32u Version;
Get_L4 (Version, "Version");
if (Version==2)
{
Skip_L4( "Number of fields");
Skip_L4( "Length of the ancillary data field descriptions");
Skip_L4( "Byte size of the complete ancillary media packet");
Skip_L4( "Format of the video");
}
else
Skip_XX(Element_Size-Element_Offset, "Unknown");
}
//---------------------------------------------------------------------------
void File_Riff::rcrd_fld_()
{
Element_Name("Ancillary data field description");
}
//---------------------------------------------------------------------------
void File_Riff::rcrd_fld__anc_()
{
Element_Name("Ancillary data sample description");
rcrd_fld__anc__pos__LineNumber=(int32u)-1;
}
//---------------------------------------------------------------------------
void File_Riff::rcrd_fld__anc__pos_()
{
Element_Name("Ancillary data sample description");
//Parsing
Get_L4 (rcrd_fld__anc__pos__LineNumber, "Video line number");
Skip_L4( "Ancillary video color difference or luma space");
Skip_L4( "Ancillary video space");
}
//---------------------------------------------------------------------------
void File_Riff::rcrd_fld__anc__pyld()
{
Element_Name("Ancillary data sample payload");
if (Ancillary)
{
(*Ancillary)->FrameInfo.DTS=FrameInfo.DTS;
(*Ancillary)->LineNumber=rcrd_fld__anc__pos__LineNumber;
Open_Buffer_Continue(*Ancillary);
}
}
//---------------------------------------------------------------------------
void File_Riff::rcrd_fld__finf()
{
Element_Name("Data field description");
//Parsing
Skip_L4( "Video field identifier");
}
#endif //MEDIAINFO_GXF_YES
//---------------------------------------------------------------------------
void File_Riff::RDIB()
{
Data_Accept("RIFF DIB");
Element_Name("RIFF DIB");
//Filling
Fill(Stream_General, 0, General_Format, "RIFF DIB");
}
//---------------------------------------------------------------------------
void File_Riff::RMID()
{
Data_Accept("RIFF MIDI");
Element_Name("RIFF MIDI");
//Filling
Fill(Stream_General, 0, General_Format, "RIFF MIDI");
}
//---------------------------------------------------------------------------
void File_Riff::RMMP()
{
Data_Accept("RIFF MMP");
Element_Name("RIFF MMP");
//Filling
Fill(Stream_General, 0, General_Format, "RIFF MMP");
}
//---------------------------------------------------------------------------
void File_Riff::RMP3()
{
Data_Accept("RMP3");
Element_Name("RMP3");
//Filling
Fill(Stream_General, 0, General_Format, "RMP3");
Kind=Kind_Rmp3;
}
//---------------------------------------------------------------------------
void File_Riff::RMP3_data()
{
Element_Name("Raw datas");
Fill(Stream_Audio, 0, Audio_StreamSize, Buffer_DataToParse_End-Buffer_DataToParse_Begin);
Stream_Prepare(Stream_Audio);
//Creating parser
#if defined(MEDIAINFO_MPEGA_YES)
File_Mpega* Parser=new File_Mpega;
Parser->CalculateDelay=true;
Parser->ShouldContinueParsing=true;
Open_Buffer_Init(Parser);
stream& StreamItem=Stream[(int32u)-1];
StreamItem.StreamKind=Stream_Audio;
StreamItem.StreamPos=0;
StreamItem.Parsers.push_back(Parser);
#else //MEDIAINFO_MPEG4_YES
Fill(Stream_Audio, 0, Audio_Format, "MPEG Audio");
Skip_XX(Buffer_DataToParse_End-Buffer_DataToParse_Begin, "Data");
#endif
}
//---------------------------------------------------------------------------
void File_Riff::RMP3_data_Continue()
{
#if MEDIAINFO_DEMUX
if (Element_Size)
{
Demux_random_access=true;
Demux(Buffer+Buffer_Offset, (size_t)Element_Size, ContentType_MainStream);
}
#endif //MEDIAINFO_DEMUX
Element_Code=(int64u)-1;
AVI__movi_xxxx();
}
//---------------------------------------------------------------------------
void File_Riff::SMV0()
{
Accept("SMV");
//Parsing
int8u Version;
Skip_C1( "Identifier (continuing)");
Get_C1 (Version, "Version");
Skip_C3( "Identifier (continuing)");
if (Version=='1')
{
int32u Width, Height, FrameRate, BlockSize, FrameCount;
Get_B3 (Width, "Width");
Get_B3 (Height, "Height");
Skip_B3( "0x000010");
Skip_B3( "0x000001");
Get_B3 (BlockSize, "Block size");
Get_B3 (FrameRate, "Frame rate");
Get_B3 (FrameCount, "Frame count");
Skip_B3( "0x000000");
Skip_B3( "0x000000");
Skip_B3( "0x000000");
Skip_B3( "0x010101");
Skip_B3( "0x010101");
Skip_B3( "0x010101");
Skip_B3( "0x010101");
//Filling
Fill(Stream_General, 0, General_Format_Profile, "SMV v1");
Stream_Prepare(Stream_Video);
Fill(Stream_Video, 0, Video_MuxingMode, "SMV v1");
Fill(Stream_Video, 0, Video_Width, Width);
Fill(Stream_Video, 0, Video_Height, Height);
Fill(Stream_Video, 0, Video_FrameRate, (float)FrameRate);
Fill(Stream_Video, 0, Video_FrameCount, FrameCount);
Finish("SMV");
}
else if (Version=='2')
{
int32u Width, Height, FrameRate;
Get_L3 (Width, "Width");
Get_L3 (Height, "Height");
Skip_L3( "0x000010");
Skip_L3( "0x000001");
Get_L3 (SMV_BlockSize, "Block size");
Get_L3 (FrameRate, "Frame rate");
Get_L3 (SMV_FrameCount, "Frame count");
Skip_L3( "0x000001");
Skip_L3( "0x000000");
Skip_L3( "Frame rate");
Skip_L3( "0x010101");
Skip_L3( "0x010101");
Skip_L3( "0x010101");
Skip_L3( "0x010101");
//Filling
SMV_BlockSize+=3;
SMV_FrameCount++;
Fill(Stream_General, 0, General_Format_Profile, "SMV v2");
Stream_Prepare(Stream_Video);
Fill(Stream_Video, 0, Video_Format, "JPEG");
Fill(Stream_Video, 0, Video_Codec, "JPEG");
Fill(Stream_Video, 0, Video_MuxingMode, "SMV v2");
Fill(Stream_Video, 0, Video_Width, Width);
Fill(Stream_Video, 0, Video_Height, Height);
Fill(Stream_Video, 0, Video_FrameRate, FrameRate);
Fill(Stream_Video, 0, Video_FrameCount, SMV_FrameCount);
Fill(Stream_Video, 0, Video_StreamSize, SMV_BlockSize*SMV_FrameCount);
}
else
Finish("SMV");
}
//---------------------------------------------------------------------------
void File_Riff::SMV0_xxxx()
{
//Parsing
int32u Size;
Get_L3 (Size, "Size");
#if defined(MEDIAINFO_JPEG_YES)
//Creating the parser
File_Jpeg MI;
Open_Buffer_Init(&MI);
//Parsing
Open_Buffer_Continue(&MI, Size);
//Filling
Finish(&MI);
Merge(MI, Stream_Video, 0, StreamPos_Last);
//Positioning
Element_Offset+=Size;
#else
//Parsing
Skip_XX(Size, "JPEG data");
#endif
Skip_XX(Element_Size-Element_Offset, "Padding");
//Filling
#if MEDIAINFO_HASH
if (Config->File_Hash_Get().to_ulong())
Element_Offset=Element_Size+(SMV_FrameCount-1)*SMV_BlockSize;
#endif //MEDIAINFO_HASH
Data_GoTo(File_Offset+Buffer_Offset+(size_t)Element_Size+(SMV_FrameCount-1)*SMV_BlockSize, "SMV");
SMV_BlockSize=0;
}
//---------------------------------------------------------------------------
void File_Riff::WAVE()
{
Data_Accept("Wave");
Element_Name("Wave");
//Filling
Fill(Stream_General, 0, General_Format, "Wave");
Kind=Kind_Wave;
#if MEDIAINFO_EVENTS
StreamIDs_Width[0]=0;
#endif //MEDIAINFO_EVENTS
}
//---------------------------------------------------------------------------
void File_Riff::WAVE__pmx()
{
Element_Name("XMP");
//Parsing
Ztring XML_Data;
Get_Local(Element_Size, XML_Data, "XML data");
}
//---------------------------------------------------------------------------
void File_Riff::WAVE_aXML()
{
Element_Name("aXML");
//Parsing
Skip_Local(Element_Size, "XML data");
}
//---------------------------------------------------------------------------
void File_Riff::WAVE_bext()
{
Element_Name("Broadcast extension");
//Parsing
Ztring Description, Originator, OriginatorReference, OriginationDate, OriginationTime, History;
int16u Version;
Get_Local(256, Description, "Description");
Get_Local( 32, Originator, "Originator");
Get_Local( 32, OriginatorReference, "OriginatorReference");
Get_Local( 10, OriginationDate, "OriginationDate");
Get_Local( 8, OriginationTime, "OriginationTime");
Get_L8 ( TimeReference, "TimeReference"); //To be divided by SamplesPerSec
Get_L2 ( Version, "Version");
if (Version==1)
Skip_UUID( "UMID");
Skip_XX (602-Element_Offset, "Reserved");
if (Element_Offset<Element_Size)
Get_Local(Element_Size-Element_Offset, History, "History");
FILLING_BEGIN();
Fill(Stream_General, 0, General_Description, Description);
Fill(Stream_General, 0, General_Producer, Originator);
Fill(Stream_General, 0, "Producer_Reference", OriginatorReference);
Fill(Stream_General, 0, General_Encoded_Date, OriginationDate+__T(' ')+OriginationTime);
Fill(Stream_General, 0, General_Encoded_Library_Settings, History);
if (SamplesPerSec && TimeReference!=(int64u)-1)
{
Fill(Stream_Audio, 0, Audio_Delay, float64_int64s(((float64)TimeReference)*1000/SamplesPerSec));
Fill(Stream_Audio, 0, Audio_Delay_Source, "Container (bext)");
}
FILLING_END();
}
//---------------------------------------------------------------------------
void File_Riff::WAVE_cue_()
{
Element_Name("Cue points");
//Parsing
int32u numCuePoints;
Get_L4(numCuePoints, "numCuePoints");
for (int32u Pos=0; Pos<numCuePoints; Pos++)
{
Element_Begin1("Cue point");
Skip_L4( "ID");
Skip_L4( "Position");
Skip_C4( "DataChunkID");
Skip_L4( "ChunkStart");
Skip_L4( "BlockStart");
Skip_L4( "SampleOffset");
Element_End0();
}
}
//---------------------------------------------------------------------------
void File_Riff::WAVE_data()
{
Element_Name("Raw datas");
if (Buffer_DataToParse_End-Buffer_DataToParse_Begin<100)
{
Skip_XX(Buffer_DataToParse_End-Buffer_Offset, "Unknown");
return; //This is maybe embeded in another container, and there is only the header (What is the junk?)
}
FILLING_BEGIN();
Fill(Stream_Audio, 0, Audio_StreamSize, Buffer_DataToParse_End-Buffer_DataToParse_Begin);
FILLING_END();
//Parsing
Element_Code=(int64u)-1;
FILLING_BEGIN();
int64u Duration=Retrieve(Stream_Audio, 0, Audio_Duration).To_int64u();
int64u BitRate=Retrieve(Stream_Audio, 0, Audio_BitRate).To_int64u();
if (Duration)
{
int64u BitRate_New=(Buffer_DataToParse_End-Buffer_DataToParse_Begin)*8*1000/Duration;
if (BitRate_New<BitRate*0.95 || BitRate_New>BitRate*1.05)
Fill(Stream_Audio, 0, Audio_BitRate, BitRate_New, 10, true); //Correcting the bitrate, it was false in the header
}
else if (BitRate)
{
if (IsSub)
//Retrieving "data" real size, in case of truncated files and/or wave header in another container
Duration=((int64u)LittleEndian2int32u(Buffer+Buffer_Offset-4))*8*1000/BitRate; //TODO: RF64 is not handled
else
Duration=(Buffer_DataToParse_End-Buffer_DataToParse_Begin)*8*1000/BitRate;
Fill(Stream_General, 0, General_Duration, Duration, 10, true);
Fill(Stream_Audio, 0, Audio_Duration, Duration, 10, true);
}
FILLING_END();
}
//---------------------------------------------------------------------------
void File_Riff::WAVE_data_Continue()
{
#if MEDIAINFO_DEMUX
Element_Code=(int64u)-1;
if (AvgBytesPerSec && Demux_Rate)
{
FrameInfo.DTS=float64_int64s((File_Offset+Buffer_Offset-Buffer_DataToParse_Begin)*1000000000.0/AvgBytesPerSec);
FrameInfo.PTS=FrameInfo.DTS;
Frame_Count_NotParsedIncluded=float64_int64s(((float64)FrameInfo.DTS)/1000000000.0*Demux_Rate);
}
Demux_random_access=true;
Demux(Buffer+Buffer_Offset, (size_t)Element_Size, ContentType_MainStream);
Frame_Count_NotParsedIncluded=(int64u)-1;
#endif //MEDIAINFO_DEMUX
Element_Code=(int64u)-1;
AVI__movi_xxxx();
}
//---------------------------------------------------------------------------
void File_Riff::WAVE_ds64()
{
Element_Name("DataSize64");
//Parsing
int32u tableLength;
Skip_L8( "riffSize"); //Is directly read from the header parser
Get_L8 (WAVE_data_Size, "dataSize");
Get_L8 (WAVE_fact_samplesCount, "sampleCount");
Get_L4 (tableLength, "tableLength");
for (int32u Pos=0; Pos<tableLength; Pos++)
Skip_L8( "table[]");
}
//---------------------------------------------------------------------------
void File_Riff::WAVE_fact()
{
Element_Name("Sample count");
//Parsing
int64u SamplesCount64;
int32u SamplesCount;
Get_L4 (SamplesCount, "SamplesCount");
SamplesCount64=SamplesCount;
if (SamplesCount64==0xFFFFFFFF)
SamplesCount64=WAVE_fact_samplesCount;
FILLING_BEGIN();
int32u SamplingRate=Retrieve(Stream_Audio, 0, Audio_SamplingRate).To_int32u();
if (SamplingRate)
{
//Calculating
int64u Duration=(SamplesCount64*1000)/SamplingRate;
//Coherency test
bool IsOK=true;
if (File_Size!=(int64u)-1)
{
int64u BitRate=Retrieve(Stream_Audio, 0, Audio_BitRate).To_int64u();
if (BitRate)
{
int64u Duration_FromBitRate = File_Size * 8 * 1000 / BitRate;
if (Duration_FromBitRate > Duration*1.10 || Duration_FromBitRate < Duration*0.9)
IsOK = false;
}
}
//Filling
if (IsOK)
Fill(Stream_Audio, 0, Audio_Duration, Duration);
}
FILLING_END();
}
//---------------------------------------------------------------------------
void File_Riff::WAVE_fmt_()
{
//Compute the current codec ID
Element_Code=(int64u)-1;
Stream_ID=(int32u)-1;
stream_Count=1;
Stream[(int32u)-1].fccType=Elements::AVI__hdlr_strl_strh_auds;
AVI__hdlr_strl_strf();
}
//---------------------------------------------------------------------------
void File_Riff::WAVE_ID3_()
{
Element_Name("ID3v2 tags");
//Parsing
#if defined(MEDIAINFO_ID3V2_YES)
File_Id3v2 MI;
Open_Buffer_Init(&MI);
Open_Buffer_Continue(&MI);
Finish(&MI);
Merge(MI, Stream_General, 0, 0);
#endif
}
//---------------------------------------------------------------------------
void File_Riff::WAVE_iXML()
{
Element_Name("iXML");
//Parsing
Skip_Local(Element_Size, "XML data");
}
//---------------------------------------------------------------------------
void File_Riff::wave()
{
Data_Accept("Wave64");
Element_Name("Wave64");
//Filling
Fill(Stream_General, 0, General_Format, "Wave64");
}
//---------------------------------------------------------------------------
void File_Riff::W3DI()
{
Element_Name("IDVX tags (Out of specs!)");
//Parsing
int32u Size=(int32u)Element_Size;
Ztring Title, Artist, Album, Unknown, Genre, Comment;
int32u TrackPos;
Get_Local(Size, Title, "Title");
Element_Offset=(int32u)Title.size();
Size-=(int32u)Title.size();
if (Size==0) return;
Skip_L1( "Zero"); Size--; //NULL char
Get_Local(Size, Artist, "Artist");
Element_Offset=(int32u)Title.size()+1+(int32u)Artist.size();
Size-=(int32u)Artist.size();
if (Size==0) return;
Skip_L1( "Zero"); Size--; //NULL char
Get_Local(Size, Album, "Album");
Element_Offset=(int32u)Title.size()+1+(int32u)Artist.size()+1+(int32u)Album.size();
Size-=(int32u)Album.size();
if (Size==0) return;
Skip_L1( "Zero"); Size--; //NULL char
Get_Local(Size, Unknown, "Unknown");
Element_Offset=(int32u)Title.size()+1+(int32u)Artist.size()+1+(int32u)Album.size()+1+(int32u)Unknown.size();
Size-=(int32u)Unknown.size();
if (Size==0) return;
Skip_L1( "Zero"); Size--; //NULL char
Get_Local(Size, Genre, "Genre");
Element_Offset=(int32u)Title.size()+1+(int32u)Artist.size()+1+(int32u)Album.size()+1+(int32u)Unknown.size()+1+(int32u)Genre.size();
Size-=(int32u)Genre.size();
if (Size==0) return;
Skip_L1( "Zero"); Size--; //NULL char
Get_Local(Size, Comment, "Comment");
Element_Offset=(int32u)Title.size()+1+(int32u)Artist.size()+1+(int32u)Album.size()+1+(int32u)Unknown.size()+1+(int32u)Genre.size()+1+(int32u)Comment.size();
Size-=(int32u)Comment.size();
if (Size==0) return;
Skip_L1( "Zero"); Size--; //NULL char
Get_L4 (TrackPos, "Track_Position");
if(Element_Offset+8<Element_Size)
Skip_XX(Element_Size-Element_Offset, "Unknown");
Element_Begin1("Footer");
Skip_L4( "Size");
Skip_C4( "Name");
Element_End0();
//Filling
Fill(Stream_General, 0, General_Track, Title);
Fill(Stream_General, 0, General_Performer, Artist);
Fill(Stream_General, 0, General_Album, Album);
Fill(Stream_General, 0, "Unknown", Unknown);
Fill(Stream_General, 0, General_Genre, Genre);
Fill(Stream_General, 0, General_Comment, Comment);
Fill(Stream_General, 0, General_Track_Position, TrackPos);
}
void File_Riff::Open_Buffer_Init_All()
{
stream& StreamItem = Stream[Stream_ID];
for (size_t Pos = 0; Pos<StreamItem.Parsers.size(); Pos++)
Open_Buffer_Init(StreamItem.Parsers[Pos]);
}
//***************************************************************************
// C++
//***************************************************************************
} //NameSpace
#endif //MEDIAINFO_RIFF_YES<|fim▁end|> | {
Element_Name("Google Metadatas");
//Parsing |
<|file_name|>windows.py<|end_file_name|><|fim▁begin|>from __future__ import division
from PyQt5 import QtCore, QtWidgets
from pycho.gui.widgets import GLPlotWidget
from pycho.world.navigation import DIRECTIONS
from pycho.gui.interaction import QT_KEYS, is_left, is_right, is_up, is_down
from pycho.world.helpers import box_around
import logging
<|fim▁hole|>#hence why they are used here
class DefaultWindow(QtWidgets.QMainWindow):
def __init__(self, game,
key_press_handler=None,
mouse_click_handler=None,
mouse_release_handler=None,
tick_time=0,
width=600,
height=400,
key_press_handlers=None,
mouse_click_handlers=None,
mouse_release_handlers=None):
super(DefaultWindow, self).__init__()
self.game = game
self.widget = GLPlotWidget(100, 100, self.game)
self.widget.setGeometry(0, 0, self.widget.width, self.widget.height)
self.setCentralWidget(self.widget)
self.show()
self.paint_timer = QtCore.QBasicTimer()
self.clean_timer = QtCore.QBasicTimer()
self.tick_timer = QtCore.QBasicTimer()
self.callbacks = [self.widget.updateGL, self.game.world.clean_up, self.game.world.tick]
QtCore.QMetaObject.connectSlotsByName(self)
self.paint_timer.start(30, self)
self.clean_timer.start(40, self)
self.timers = [self.paint_timer, self.clean_timer]
self.timer_times = [30, 40]
if tick_time != TURN_BASED:
self.tick_timer.start(tick_time, self)
self.timers.append(self.tick_timer)
self.timer_times.append(tick_time)
self.resize(width, height)
if key_press_handler is None:
key_press_handler = lambda self, event: self._defaultKeyPressHandler(event)
if mouse_click_handler is None:
mouse_click_handler = lambda self, event: self._defaultMousePressHandler(event)
if mouse_release_handler is None:
mouse_release_handler = lambda *a, **kw: None
if key_press_handlers is None:
key_press_handlers = {'*' : key_press_handler}
if mouse_click_handlers is None:
mouse_click_handlers = {'*' : mouse_click_handler}
if mouse_release_handlers is None:
mouse_release_handlers = {'*' : mouse_release_handler}
self.key_press_handlers = key_press_handlers
self.mouse_click_handlers = mouse_click_handlers
self.mouse_release_handlers = mouse_release_handlers
self.is_paused = False
def timerEvent(self, event):
self.callbacks[event.timerId() - 1]()
def _defaultKeyPressHandler(self, event):
key = event.key()
logging.debug('Key {} was pressed'.format(key))
if is_left(key):
face_movement = DIRECTIONS['left']
elif is_right(key):
face_movement = DIRECTIONS['right']
elif is_up(key):
face_movement = DIRECTIONS['up']
elif is_down(key):
face_movement = DIRECTIONS['down']
elif key == QT_KEYS['Space']:
face_movement = DIRECTIONS['still']
else:
return
logging.debug('Face movement set to {}'.format(face_movement))
logging.debug('Player is facing {}'.format(self.game.player.facing))
self.game.player.facing = face_movement
self.game.world.tick()
def map_point_to_game_world(self, x, y):
i = int((x / self.widget.width) * self.game.world.width)
j = int(((self.widget.height - y) / self.widget.height) * self.game.world.height)
return (i, j)
def _current_handler(self, handlers):
level_id = self.game.world.current_level.id
if level_id not in handlers:
try:
return handlers['*']
except KeyError:
logging.error('No default handler set as *!')
return handlers[level_id]
def _defaultMousePressHandler(self, event, pointer_size=5):
x, y = self.map_point_to_game_world(event.x(), event.y())
# gradually grow the pointer to be bigger to
# allow for a greater control on what is clicked
for j in xrange(pointer_size):
try:
obj = self.game.world.colliding_object(None,
box_around(x, y, j, j))
except:
break
if obj is not None:
logging.error(obj)
break
else:
logging.error("Nothing found!")
def pause(self):
for timer in self.timers:
self.timers.stop()
self.is_paused = True
def unpause(self):
for timer, time in zip(self.timers, self.timer_times):
self.timers.start(time)
self.is_paused = False
def keyPressEvent(self, event):
self._current_handler(self.key_press_handlers)(self, event)
def mousePressEvent(self, event):
self._current_handler(self.mouse_click_handlers)(self, event)
def mouseReleaseEvent(self, event):
self._current_handler(self.mouse_release_handlers)(self, event)
def closeEvent(self, event):
logging.debug("Dumping to text file")
self.game.world.mind_dump()<|fim▁end|> | xrange = range
TURN_BASED = 0
#Qt uses camelCase for naming methods, |
<|file_name|>v1.ts<|end_file_name|><|fim▁begin|>/**
* Copyright 2015 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {AxiosPromise} from 'axios';
import {Compute, JWT, OAuth2Client, UserRefreshClient} from 'google-auth-library';
import {BodyResponseCallback, createAPIRequest, GlobalOptions, GoogleConfigurable, MethodOptions} from 'googleapis-common';
// tslint:disable: no-any
// tslint:disable: class-name
// tslint:disable: variable-name
// tslint:disable: jsdoc-format
// tslint:disable: no-namespace
export namespace acceleratedmobilepageurl_v1 {
export interface Options extends GlobalOptions {
version: 'v1';
}
/**
* Accelerated Mobile Pages (AMP) URL API
*
* Retrieves the list of AMP URLs (and equivalent AMP Cache URLs) for a given
* list of public URL(s).
*
* @example
* const {google} = require('googleapis');
* const acceleratedmobilepageurl = google.acceleratedmobilepageurl('v1');
*
* @namespace acceleratedmobilepageurl
* @type {Function}
* @version v1
* @variation v1
* @param {object=} options Options for Acceleratedmobilepageurl
*/
export class Acceleratedmobilepageurl {
_options: GlobalOptions;
google?: GoogleConfigurable;
root = this;
ampUrls: Resource$Ampurls;
constructor(options: GlobalOptions, google?: GoogleConfigurable) {
this._options = options || {};
this.google = google;
this.getRoot.bind(this);
this.ampUrls = new Resource$Ampurls(this);
}
getRoot() {
return this.root;
}
}
/**
* AMP URL response for a requested URL.
*/
export interface Schema$AmpUrl {
/**
* The AMP URL pointing to the publisher's web server.
*/
ampUrl?: string;
/**
* The [AMP Cache URL](/amp/cache/overview#amp-cache-url-format) pointing to
* the cached document in the Google AMP Cache.
*/
cdnAmpUrl?: string;
/**
* The original non-AMP URL.
*/
originalUrl?: string;
}
/**
* AMP URL Error resource for a requested URL that couldn't be found.
*/
export interface Schema$AmpUrlError {
/**
* The error code of an API call.
*/
errorCode?: string;
/**
* An optional descriptive error message.
*/
errorMessage?: string;
/**
* The original non-AMP URL.
*/
originalUrl?: string;
}
/**
* AMP URL request for a batch of URLs.
*/
export interface Schema$BatchGetAmpUrlsRequest {
/**
* The lookup_strategy being requested.
*/
lookupStrategy?: string;
/**
* List of URLs to look up for the paired AMP URLs. The URLs are
* case-sensitive. Up to 50 URLs per lookup (see [Usage
* Limits](/amp/cache/reference/limits)).
*/
urls?: string[];
}
/**
* Batch AMP URL response.
*/
export interface Schema$BatchGetAmpUrlsResponse {
/**
* For each URL in BatchAmpUrlsRequest, the URL response. The response might
* not be in the same order as URLs in the batch request. If
* BatchAmpUrlsRequest contains duplicate URLs, AmpUrl is generated only
* once.
*/
ampUrls?: Schema$AmpUrl[];
/**
* The errors for requested URLs that have no AMP URL.
*/
urlErrors?: Schema$AmpUrlError[];
}
export class Resource$Ampurls {
root: Acceleratedmobilepageurl;
constructor(root: Acceleratedmobilepageurl) {
this.root = root;
this.getRoot.bind(this);
}
getRoot() {
return this.root;
}
/**
* acceleratedmobilepageurl.ampUrls.batchGet
* @desc Returns AMP URL(s) and equivalent [AMP Cache
* URL(s)](/amp/cache/overview#amp-cache-url-format).
* @alias acceleratedmobilepageurl.ampUrls.batchGet
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {().BatchGetAmpUrlsRequest} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
batchGet(
params?: Params$Resource$Ampurls$Batchget,
options?: MethodOptions): AxiosPromise<Schema$BatchGetAmpUrlsResponse>;
batchGet(
params: Params$Resource$Ampurls$Batchget,
options: MethodOptions|
BodyResponseCallback<Schema$BatchGetAmpUrlsResponse>,
callback: BodyResponseCallback<Schema$BatchGetAmpUrlsResponse>): void;
batchGet(
params: Params$Resource$Ampurls$Batchget,
callback: BodyResponseCallback<Schema$BatchGetAmpUrlsResponse>): void;
batchGet(callback: BodyResponseCallback<Schema$BatchGetAmpUrlsResponse>):
void;
batchGet(
paramsOrCallback?: Params$Resource$Ampurls$Batchget|
BodyResponseCallback<Schema$BatchGetAmpUrlsResponse>,
optionsOrCallback?: MethodOptions|
BodyResponseCallback<Schema$BatchGetAmpUrlsResponse>,
callback?: BodyResponseCallback<Schema$BatchGetAmpUrlsResponse>):
void|AxiosPromise<Schema$BatchGetAmpUrlsResponse> {
let params = (paramsOrCallback || {}) as Params$Resource$Ampurls$Batchget;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Ampurls$Batchget;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl =
options.rootUrl || 'https://acceleratedmobilepageurl.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/ampUrls:batchGet')
.replace(/([^:]\/)\/+/g, '$1'),
method: 'POST'
},
options),
params,
requiredParams: [],
pathParams: [],
context: this.getRoot()
};
if (callback) {
createAPIRequest<Schema$BatchGetAmpUrlsResponse>(parameters, callback);
} else {
return createAPIRequest<Schema$BatchGetAmpUrlsResponse>(parameters);
}
}
}
export interface Params$Resource$Ampurls$Batchget {
/**
* Auth client or API Key for the request
*/
auth?: string|OAuth2Client|JWT|Compute|UserRefreshClient;
<|fim▁hole|> * Request body metadata
*/
requestBody?: Schema$BatchGetAmpUrlsRequest;
}
}<|fim▁end|> |
/** |
<|file_name|>bitcoin_sr.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="sr" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Fuguecoin</source>
<translation>О Fuguecoin-у</translation>
</message>
<message>
<location line="+39"/>
<source><b>Fuguecoin</b> version</source>
<translation><b>Fuguecoin</b> верзија</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The Fuguecoin developers</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Адресар</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>Кликните два пута да промените адресу и/или етикету</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Прави нову адресу</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Копира изабрану адресу на системски клипборд</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&Нова адреса</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your Fuguecoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Ово су Ваше Fuguecoin адресе за примање уплата. Можете да сваком пошиљаоцу дате другачију адресу да би пратили ко је вршио уплате.</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>Prikaži &QR kod</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Fuguecoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified Fuguecoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Избриши</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your Fuguecoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>Извоз података из адресара</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Зарезом одвојене вредности (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Грешка током извоза</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Није могуће писати у фајл %1.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Етикета</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Адреса</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(без етикете)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Унесите лозинку</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Нова лозинка</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Поновите нову лозинку</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Унесите нову лозинку за приступ новчанику.<br/>Молимо Вас да лозинка буде <b>10 или више насумице одабраних знакова</b>, или <b>осам или више речи</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Шифровање новчаника</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Ова акција захтева лозинку Вашег новчаника да би га откључала.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Откључавање новчаника</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Ова акција захтева да унесете лозинку да би дешифловала новчаник.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Дешифровање новчаника</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Промена лозинке</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Унесите стару и нову лозинку за шифровање новчаника.</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Одобрите шифровање новчаника</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR BITCOINS</b>!</source>
<translation>Упозорење: Ако се ваш новчаник шифрује а потом изгубите лозинкзу, ви ћете <b>ИЗГУБИТИ СВЕ BITCOIN-Е</b>!</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Да ли сте сигурни да желите да се новчаник шифује?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>Новчаник је шифрован</translation>
</message>
<message>
<location line="-56"/>
<source>Fuguecoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your bitcoins from being stolen by malware infecting your computer.</source>
<translation>Fuguecoin će se sad zatvoriti da bi završio proces enkripcije. Zapamti da enkripcija tvog novčanika ne može u potpunosti da zaštiti tvoje bitcoine da ne budu ukradeni od malawarea koji bi inficirao tvoj kompjuter.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Неуспело шифровање новчаника</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Настала је унутрашња грешка током шифровања новчаника. Ваш новчаник није шифрован.</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>Лозинке које сте унели се не подударају.</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>Неуспело откључавање новчаника</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>Лозинка коју сте унели за откључавање новчаника је нетачна.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Неуспело дешифровање новчаника</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Лозинка за приступ новчанику је успешно промењена.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>Синхронизација са мрежом у току...</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>&Општи преглед</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Погледајте општи преглед новчаника</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>&Трансакције</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Претражите историјат трансакција</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation>Уредите запамћене адресе и њихове етикете</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>Прегледајте листу адреса на којима прихватате уплате</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>I&zlaz</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Напустите програм</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Fuguecoin</source>
<translation>Прегледајте информације о Fuguecoin-у</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>О &Qt-у</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Прегледајте информације о Qt-у</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>П&оставке...</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>&Шифровање новчаника...</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>&Backup новчаника</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>Промени &лозинку...</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-347"/>
<source>Send coins to a Fuguecoin address</source>
<translation>Пошаљите новац на bitcoin адресу</translation>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for Fuguecoin</source>
<translation>Изаберите могућности bitcoin-а</translation>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Мењање лозинке којом се шифрује новчаник</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>Fuguecoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>новчаник</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>&About Fuguecoin</source>
<translation>&О Fuguecoin-у</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your Fuguecoin addresses to prove you own them</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified Fuguecoin addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>&Фајл</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>&Подешавања</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>П&омоћ</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>Трака са картицама</translation>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+47"/>
<source>Fuguecoin client</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to Fuguecoin network</source>
<translation><numerusform>%n активна веза са Fuguecoin мрежом</numerusform><numerusform>%n активне везе са Fuguecoin мрежом</numerusform><numerusform>%n активних веза са Fuguecoin мрежом</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>Ажурно</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>Ажурирање у току...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>Послана трансакција</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>Придошла трансакција</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Datum: %1⏎ Iznos: %2⏎ Tip: %3⏎ Adresa: %4⏎</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid Fuguecoin address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Новчаник јс <b>шифрован</b> и тренутно <b>откључан</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Новчаник јс <b>шифрован</b> и тренутно <b>закључан</b></translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. Fuguecoin can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Измени адресу</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Етикета</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Адреса</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>Унешена адреса "%1" се већ налази у адресару.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Fuguecoin address.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Немогуће откључати новчаник.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>Fuguecoin-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>верзија</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>Korišćenje:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Поставке</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start Fuguecoin after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start Fuguecoin on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Fuguecoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Connect to the Fuguecoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Fuguecoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>&Јединица за приказивање износа:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Whether to show Fuguecoin addresses in the transaction list or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Fuguecoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Форма</translation>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Fuguecoin network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>Непотврђено:</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>новчаник</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Недавне трансакције</b></translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start bitcoin: click-to-pay handler</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>Zatraži isplatu</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>Iznos:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>&Етикета</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>Poruka:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&Snimi kao...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the Fuguecoin-Qt help message to get a list with possible Fuguecoin command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-104"/>
<source>Fuguecoin - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Fuguecoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Open the Fuguecoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Fuguecoin RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Слање новца</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>Ukloni sva polja sa transakcijama</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Потврди акцију слања</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>&Пошаљи</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>Да ли сте сигурни да желите да пошаљете %1?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation>и</translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>Форма</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&Етикета</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>Izaberite adresu iz adresara</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Fuguecoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation>Unesite Fuguecoin adresu (n.pr. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Fuguecoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Fuguecoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Fuguecoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation>Unesite Fuguecoin adresu (n.pr. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Enter Fuguecoin signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Fuguecoin developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>Otvorite do %1</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/nepotvrdjeno</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 potvrde</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>datum</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>етикета</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>iznos</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>, nije još uvek uspešno emitovan</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>nepoznato</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>detalji transakcije</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Ovaj odeljak pokazuje detaljan opis transakcije</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>datum</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>tip</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Адреса</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>iznos</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>Otvoreno do %1</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>Offline * van mreže (%1 potvrdjenih)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>Nepotvrdjeno (%1 of %2 potvrdjenih)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Potvrdjena (%1 potvrdjenih)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Ovaj blok nije primljen od ostalih čvorova (nodova) i verovatno neće biti prihvaćen!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Generisan ali nije prihvaćen</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>Primljen sa</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Primljeno od</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Poslat ka</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Isplata samom sebi</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Minirano</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(n/a)</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Status vaše transakcije. Predjite mišem preko ovog polja da bi ste videli broj konfirmacija</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Datum i vreme primljene transakcije.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Tip transakcije</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Destinacija i adresa transakcije</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Iznos odbijen ili dodat balansu.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>Sve</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Danas</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>ove nedelje</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Ovog meseca</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Prošlog meseca</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Ove godine</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Opseg...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Primljen sa</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Poslat ka</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>Vama - samom sebi</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Minirano</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Drugi</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Navedite adresu ili naziv koji bi ste potražili</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Min iznos</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>kopiraj adresu</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>kopiraj naziv</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>kopiraj iznos</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>promeni naziv</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation>Izvezi podatke o transakcijama</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Зарезом одвојене вредности (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Potvrdjen</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>datum</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>tip</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Етикета</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Адреса</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>iznos</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Грешка током извоза</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Није могуће писати у фајл %1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Opseg:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>do</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>Слање новца</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>Fuguecoin version</source>
<translation>Fuguecoin верзија</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation>Korišćenje:</translation>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or bitcoind</source>
<translation>Pošalji naredbu na -server ili bitcoinid
</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>Listaj komande</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation>Zatraži pomoć za komande</translation>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>Opcije</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: bitcoin.conf)</source>
<translation>Potvrdi željeni konfiguracioni fajl (podrazumevani:bitcoin.conf)</translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: bitcoind.pid)</source>
<translation>Konkretizuj pid fajl (podrazumevani: bitcoind.pid)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Gde je konkretni data direktorijum </translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 8333 or testnet: 18333)</source>
<translation>Slušaj konekcije na <port> (default: 8333 or testnet: 18333)</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Održavaj najviše <n> konekcija po priključku (default: 125)
</translation>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 8332 or testnet: 18332)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Prihvati komandnu liniju i JSON-RPC komande</translation>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Radi u pozadini kao daemon servis i prihvati komande</translation>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation>Koristi testnu mrežu</translation>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=bitcoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Fuguecoin Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/><|fim▁hole|> <message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Fuguecoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Fuguecoin will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the Fuguecoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>Korisničko ime za JSON-RPC konekcije</translation>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation>Lozinka za JSON-RPC konekcije</translation>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Dozvoli JSON-RPC konekcije sa posebne IP adrese</translation>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Pošalji komande to nodu koji radi na <ip> (default: 127.0.0.1)</translation>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Odredi veličinu zaštićenih ključeva na <n> (default: 100)</translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Ponovo skeniraj lanac blokova za nedostajuće transakcije iz novčanika</translation>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Koristi OpenSSL (https) za JSON-RPC konekcije</translation>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>privatni ključ za Server (podrazumevan: server.pem)</translation>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>Prihvatljive cifre (podrazumevano: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation>Ova poruka Pomoći</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>učitavam adrese....</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of Fuguecoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart Fuguecoin to complete</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>Učitavam blok indeksa...</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. Fuguecoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>Новчаник се учитава...</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation>Ponovo skeniram...</translation>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>Završeno učitavanje</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation type="unfinished"/>
</message>
</context>
</TS><|fim▁end|> | <source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message> |
<|file_name|>MARC21relaxed.py<|end_file_name|><|fim▁begin|># ./MARC21relaxed.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:5e592dacc0cf5bbbe827fb7d980f3324ca92c3dc
# Generated 2016-12-21 00:24:34.092428 by PyXB version 1.2.4 using Python 2.7.12.final.0
# Namespace http://www.loc.gov/MARC21/slim
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:773ffeee-c70b-11e6-9daf-00e1020040ea')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
#if pyxb.__version__ != _PyXBVersion:
# raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://www.loc.gov/MARC21/slim', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))<|fim▁hole|>
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}recordTypeType
class recordTypeType (pyxb.binding.datatypes.NMTOKEN, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'recordTypeType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 63, 2)
_Documentation = None
recordTypeType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=recordTypeType, enum_prefix=None)
recordTypeType.Bibliographic = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Bibliographic', tag='Bibliographic')
recordTypeType.Authority = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Authority', tag='Authority')
recordTypeType.Holdings = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Holdings', tag='Holdings')
recordTypeType.Classification = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Classification', tag='Classification')
recordTypeType.Community = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Community', tag='Community')
recordTypeType._InitializeFacetMap(recordTypeType._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'recordTypeType', recordTypeType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}leaderDataType
class leaderDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'leaderDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 82, 2)
_Documentation = None
leaderDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
leaderDataType._CF_pattern.addPattern(pattern='[\\dA-Za-z\\.| ]{24}')
leaderDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
leaderDataType._InitializeFacetMap(leaderDataType._CF_pattern,
leaderDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'leaderDataType', leaderDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}controlDataType
class controlDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'controlDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 99, 2)
_Documentation = None
controlDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
controlDataType._InitializeFacetMap(controlDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'controlDataType', controlDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}controltagDataType
class controltagDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'controltagDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 104, 2)
_Documentation = None
controltagDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
controltagDataType._CF_pattern.addPattern(pattern='[0-9A-Za-z]{3}')
controltagDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
controltagDataType._InitializeFacetMap(controltagDataType._CF_pattern,
controltagDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'controltagDataType', controltagDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}tagDataType
class tagDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tagDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 122, 2)
_Documentation = None
tagDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
tagDataType._CF_pattern.addPattern(pattern='(0([0-9A-Z][0-9A-Z])|0([1-9a-z][0-9a-z]))|(([1-9A-Z][0-9A-Z]{2})|([1-9a-z][0-9a-z]{2}))')
tagDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
tagDataType._InitializeFacetMap(tagDataType._CF_pattern,
tagDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'tagDataType', tagDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}indicatorDataType
class indicatorDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'indicatorDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 128, 2)
_Documentation = None
indicatorDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
indicatorDataType._CF_pattern.addPattern(pattern='[\\da-zA-Z_ ]{1}')
indicatorDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
indicatorDataType._InitializeFacetMap(indicatorDataType._CF_pattern,
indicatorDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'indicatorDataType', indicatorDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}subfieldDataType
class subfieldDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'subfieldDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 142, 2)
_Documentation = None
subfieldDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
subfieldDataType._InitializeFacetMap(subfieldDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'subfieldDataType', subfieldDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}subfieldcodeDataType
class subfieldcodeDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'subfieldcodeDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 147, 2)
_Documentation = None
subfieldcodeDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
subfieldcodeDataType._CF_pattern.addPattern(pattern='[\\dA-Za-z!"#$%&\'()*+,-./:;<=>?{}_^`~\\[\\]\\\\]{1}')
subfieldcodeDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
subfieldcodeDataType._InitializeFacetMap(subfieldcodeDataType._CF_pattern,
subfieldcodeDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'subfieldcodeDataType', subfieldcodeDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}idDataType
class idDataType (pyxb.binding.datatypes.ID):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'idDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 154, 2)
_Documentation = None
idDataType._InitializeFacetMap()
Namespace.addCategoryObject('typeBinding', 'idDataType', idDataType)
# Complex type {http://www.loc.gov/MARC21/slim}collectionType with content type ELEMENT_ONLY
class collectionType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.loc.gov/MARC21/slim}collectionType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'collectionType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 46, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.loc.gov/MARC21/slim}record uses Python identifier record
__record = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'record'), 'record', '__httpwww_loc_govMARC21slim_collectionType_httpwww_loc_govMARC21slimrecord', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 36, 2), )
record = property(__record.value, __record.set, None, 'record is a top level container element for all of the field elements which compose the record')
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_collectionType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 50, 4)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 50, 4)
id = property(__id.value, __id.set, None, None)
_ElementMap.update({
__record.name() : __record
})
_AttributeMap.update({
__id.name() : __id
})
Namespace.addCategoryObject('typeBinding', 'collectionType', collectionType)
# Complex type {http://www.loc.gov/MARC21/slim}recordType with content type ELEMENT_ONLY
class recordType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.loc.gov/MARC21/slim}recordType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'recordType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 52, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.loc.gov/MARC21/slim}leader uses Python identifier leader
__leader = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'leader'), 'leader', '__httpwww_loc_govMARC21slim_recordType_httpwww_loc_govMARC21slimleader', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 55, 8), )
leader = property(__leader.value, __leader.set, None, None)
# Element {http://www.loc.gov/MARC21/slim}controlfield uses Python identifier controlfield
__controlfield = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'controlfield'), 'controlfield', '__httpwww_loc_govMARC21slim_recordType_httpwww_loc_govMARC21slimcontrolfield', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 56, 8), )
controlfield = property(__controlfield.value, __controlfield.set, None, None)
# Element {http://www.loc.gov/MARC21/slim}datafield uses Python identifier datafield
__datafield = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'datafield'), 'datafield', '__httpwww_loc_govMARC21slim_recordType_httpwww_loc_govMARC21slimdatafield', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 57, 8), )
datafield = property(__datafield.value, __datafield.set, None, None)
# Attribute type uses Python identifier type
__type = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'type'), 'type', '__httpwww_loc_govMARC21slim_recordType_type', recordTypeType)
__type._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 60, 4)
__type._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 60, 4)
type = property(__type.value, __type.set, None, None)
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_recordType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 61, 4)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 61, 4)
id = property(__id.value, __id.set, None, None)
_ElementMap.update({
__leader.name() : __leader,
__controlfield.name() : __controlfield,
__datafield.name() : __datafield
})
_AttributeMap.update({
__type.name() : __type,
__id.name() : __id
})
Namespace.addCategoryObject('typeBinding', 'recordType', recordType)
# Complex type {http://www.loc.gov/MARC21/slim}leaderFieldType with content type SIMPLE
class leaderFieldType (pyxb.binding.basis.complexTypeDefinition):
"""MARC21 Leader, 24 bytes"""
_TypeDefinition = leaderDataType
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'leaderFieldType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 72, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is leaderDataType
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_leaderFieldType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 78, 8)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 78, 8)
id = property(__id.value, __id.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__id.name() : __id
})
Namespace.addCategoryObject('typeBinding', 'leaderFieldType', leaderFieldType)
# Complex type {http://www.loc.gov/MARC21/slim}controlFieldType with content type SIMPLE
class controlFieldType (pyxb.binding.basis.complexTypeDefinition):
"""MARC21 Fields 001-009"""
_TypeDefinition = controlDataType
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'controlFieldType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 88, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is controlDataType
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_controlFieldType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 94, 8)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 94, 8)
id = property(__id.value, __id.set, None, None)
# Attribute tag uses Python identifier tag
__tag = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tag'), 'tag', '__httpwww_loc_govMARC21slim_controlFieldType_tag', controltagDataType, required=True)
__tag._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 95, 8)
__tag._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 95, 8)
tag = property(__tag.value, __tag.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__id.name() : __id,
__tag.name() : __tag
})
Namespace.addCategoryObject('typeBinding', 'controlFieldType', controlFieldType)
# Complex type {http://www.loc.gov/MARC21/slim}dataFieldType with content type ELEMENT_ONLY
class dataFieldType (pyxb.binding.basis.complexTypeDefinition):
"""MARC21 Variable Data Fields 010-999"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'dataFieldType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 110, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.loc.gov/MARC21/slim}subfield uses Python identifier subfield
__subfield = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'subfield'), 'subfield', '__httpwww_loc_govMARC21slim_dataFieldType_httpwww_loc_govMARC21slimsubfield', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 115, 6), )
subfield = property(__subfield.value, __subfield.set, None, None)
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_dataFieldType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 117, 4)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 117, 4)
id = property(__id.value, __id.set, None, None)
# Attribute tag uses Python identifier tag
__tag = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tag'), 'tag', '__httpwww_loc_govMARC21slim_dataFieldType_tag', tagDataType, required=True)
__tag._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 118, 4)
__tag._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 118, 4)
tag = property(__tag.value, __tag.set, None, None)
# Attribute ind1 uses Python identifier ind1
__ind1 = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ind1'), 'ind1', '__httpwww_loc_govMARC21slim_dataFieldType_ind1', indicatorDataType, required=True)
__ind1._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 119, 4)
__ind1._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 119, 4)
ind1 = property(__ind1.value, __ind1.set, None, None)
# Attribute ind2 uses Python identifier ind2
__ind2 = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ind2'), 'ind2', '__httpwww_loc_govMARC21slim_dataFieldType_ind2', indicatorDataType, required=True)
__ind2._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 120, 4)
__ind2._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 120, 4)
ind2 = property(__ind2.value, __ind2.set, None, None)
_ElementMap.update({
__subfield.name() : __subfield
})
_AttributeMap.update({
__id.name() : __id,
__tag.name() : __tag,
__ind1.name() : __ind1,
__ind2.name() : __ind2
})
Namespace.addCategoryObject('typeBinding', 'dataFieldType', dataFieldType)
# Complex type {http://www.loc.gov/MARC21/slim}subfieldatafieldType with content type SIMPLE
class subfieldatafieldType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.loc.gov/MARC21/slim}subfieldatafieldType with content type SIMPLE"""
_TypeDefinition = subfieldDataType
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'subfieldatafieldType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 134, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is subfieldDataType
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_subfieldatafieldType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 137, 8)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 137, 8)
id = property(__id.value, __id.set, None, None)
# Attribute code uses Python identifier code
__code = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'code'), 'code', '__httpwww_loc_govMARC21slim_subfieldatafieldType_code', subfieldcodeDataType, required=True)
__code._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 138, 8)
__code._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 138, 8)
code = property(__code.value, __code.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__id.name() : __id,
__code.name() : __code
})
Namespace.addCategoryObject('typeBinding', 'subfieldatafieldType', subfieldatafieldType)
record = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'record'), recordType, nillable=pyxb.binding.datatypes.boolean(1), documentation='record is a top level container element for all of the field elements which compose the record', location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 36, 2))
Namespace.addCategoryObject('elementBinding', record.name().localName(), record)
collection = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'collection'), collectionType, nillable=pyxb.binding.datatypes.boolean(1), documentation='collection is a top level container element for 0 or many records', location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 41, 2))
Namespace.addCategoryObject('elementBinding', collection.name().localName(), collection)
collectionType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'record'), recordType, nillable=pyxb.binding.datatypes.boolean(1), scope=collectionType, documentation='record is a top level container element for all of the field elements which compose the record', location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 36, 2)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 47, 4))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(collectionType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'record')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 48, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
collectionType._Automaton = _BuildAutomaton()
recordType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'leader'), leaderFieldType, scope=recordType, location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 55, 8)))
recordType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'controlfield'), controlFieldType, scope=recordType, location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 56, 8)))
recordType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'datafield'), dataFieldType, scope=recordType, location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 57, 8)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 54, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(recordType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'leader')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 55, 8))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(recordType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'controlfield')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 56, 8))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(recordType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'datafield')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 57, 8))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
st_2._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
recordType._Automaton = _BuildAutomaton_()
dataFieldType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'subfield'), subfieldatafieldType, scope=dataFieldType, location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 115, 6)))
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(dataFieldType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'subfield')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 115, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
dataFieldType._Automaton = _BuildAutomaton_2()<|fim▁end|> | instance = handler.rootObject()
return instance |
<|file_name|>slice-panic-1.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that if a slicing expr[..] fails, the correct cleanups happen.<|fim▁hole|>
use std::thread;
struct Foo;
static mut DTOR_COUNT: int = 0;
impl Drop for Foo {
fn drop(&mut self) { unsafe { DTOR_COUNT += 1; } }
}
fn foo() {
let x: &[_] = &[Foo, Foo];
&x[3..4];
}
fn main() {
let _ = thread::spawn(move|| foo()).join();
unsafe { assert!(DTOR_COUNT == 2); }
}<|fim▁end|> | |
<|file_name|>rhev-nagios-table-host-mem.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Author: Pablo Iranzo Gomez ([email protected])
#
# Description: Script for monitoring host Memory status and VM's rhevm-sdk
# api and produce NAGIOS valid output
#
# Requires rhevm-sdk to work
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys
import optparse<|fim▁hole|>from ovirtsdk.xml import params
description = """
RHEV-nagios-table-host-mem output is a script for querying RHEVM via API to get host status
It's goal is to output a table of host/vm status for simple monitoring via external utilities
"""
# Option parsing
p = optparse.OptionParser("rhev-nagios-table-host-mem.py [arguments]", description=description)
p.add_option('-v', "--verbosity", dest="verbosity", help="Show messages while running", metavar='[0-n]', default=0,
type='int')
p.add_option("--host", dest="host", help="Show messages while running", metavar='host')
p.add_option("-t", "--table", dest="table", help="Input file in CSV format", metavar='table')
(options, args) = p.parse_args()
# MAIN PROGRAM
if not options.host:
print("Host not defined, exiting")
sys.exit(1)
if not options.table:
print("CSV table not defined, exiting")
sys.exit(1)
try:
f = file(options.table) # fichero a procesar
except:
print("Problem opening the file %s" % options.table)
sys.exit(1)
# NAGIOS PRIOS:
# 0 -> ok
# 1 -> warning
# 2 -> critical
# 3 -> unknown
# By default, return unknown
# TYPE;HOST;STATE;CPU;MEM
# host;rhev01.lab.local;up;16;0.0
for line in f:
if line.split(";")[0] == "host":
if line.split(";")[1] == options.host:
usage = int(line.split(";")[4])
retorno = 3
if usage >= 90:
retorno = 1
if usage >= 95:
retorno = 2
else:
retorno = 0
print(usage)
sys.exit(retorno)<|fim▁end|> | |
<|file_name|>AntlrParser.js<|end_file_name|><|fim▁begin|>"use strict";
var antlr4 = require('antlr4/index');
var LambdaCalculusLexer = require("../antlr/generated/LambdaCalculusLexer");
var LambdaCalculusParser = require("../antlr/generated/LambdaCalculusParser");
var AstCreator = require("./ParseTreeListeningAstCreator").AstCreator;
var Immutable = require('immutable');
module.exports.AntlrParser = function () {
var self = {};
self.parse = function (input) {
var chars = new antlr4.InputStream(input);
var lexer = new LambdaCalculusLexer.LambdaCalculusLexer(chars);
var tokens = new antlr4.CommonTokenStream(lexer);
var parser = new LambdaCalculusParser.LambdaCalculusParser(tokens);
parser.buildParseTrees = true;
var tree = parser.expression();
var astCreator = new AstCreator();<|fim▁hole|> // todo - implement error handling
return Immutable.fromJS({
value: result,
status: true
});
};
return self;
}();<|fim▁end|> | var result = tree.accept(astCreator);
|
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>#![recursion_limit = "128"]
#![allow(dead_code)]
#[macro_use]
mod util;
mod dispatch;
use anyhow::Result;
use monger_core::os::OS_NAMES;
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
#[structopt(about, author)]
enum Options {
/// clear the database files for an installed MongoDB version
Clear {
/// the ID of the MongoDB version whose files should be cleared
#[structopt(name = "ID")]
id: String,
},
/// deletes an installed MongoDB version
Delete {
/// the ID of the MongoDB version to delete
#[structopt(name = "ID")]
id: String,
},
/// manages the default arguments used when starting a mongod
Defaults(Defaults),
/// downloads a MongoDB version from a given URL
Download {
/// the URL to download from
#[structopt(name = "URL")]
url: String,
/// specify a unique identifier for the MongoDB version being downloaded
#[structopt(long)]
id: String,
/// download the MongoDB version even if it already is installed
#[structopt(long, short)]
force: bool,
},
/// downloads a MongoDB version
Get {
/// the MongoDB version to download
#[structopt(name = "VERSION")]
version: String,
/// download the MongoDB version even if it already is installed
#[structopt(long, short)]
force: bool,
/// the OS version to download
#[structopt(long, possible_values(&OS_NAMES))]
os: Option<String>,
/// specify a unique identifier for the MongoDB version being downloaded; if not specified,
/// it will default to the version string (i,e, 'x.y.z')
#[structopt(long)]
id: Option<String>,
},
/// lists installed MongoDB versions
List,
/// deletes versions of MongoDB where a newer stable version of the same minor version is
/// installed
Prune,
/// run a binary of a downloaded MongoDB version
Run {
/// the ID of the MongoDB version of the binary being run
#[structopt(name = "ID")]
id: String,
/// the MongoDB binary to run
#[structopt(name = "BIN")]
bin: String,
/// arguments for the MongoDB binary being run
#[structopt(name = "BIN_ARGS", last(true))]
bin_args: Vec<String>,
},
/// updates monger to the latest version
SelfUpdate,
/// start an installed mongod
Start {
/// the ID of the mongod version to start
#[structopt(name = "ID")]
id: String,<|fim▁hole|> #[structopt(name = "MONGODB_ARGS", last(true))]
mongod_args: Vec<String>,
},
}
#[derive(Debug, StructOpt)]
enum Defaults {
/// clears the previously set default arguments
Clear,
/// prints the default arguments used when starting a mongod
Get,
/// sets the default arguments used when starting a mongod
Set {
#[structopt(name = "ARGS", last(true))]
args: Vec<String>,
},
}
fn main() -> Result<()> {
Options::from_args().dispatch()
}<|fim▁end|> |
/// extra arguments for the mongod being run |
<|file_name|>glob.js<|end_file_name|><|fim▁begin|>// Approach:
//
// 1. Get the minimatch set
// 2. For each pattern in the set, PROCESS(pattern, false)
// 3. Store matches per-set, then uniq them
//
// PROCESS(pattern, inGlobStar)
// Get the first [n] items from pattern that are all strings
// Join these together. This is PREFIX.
// If there is no more remaining, then stat(PREFIX) and
// add to matches if it succeeds. END.
//
// If inGlobStar and PREFIX is symlink and points to dir
// set ENTRIES = []
// else readdir(PREFIX) as ENTRIES
// If fail, END
//
// with ENTRIES
// If pattern[n] is GLOBSTAR
// // handle the case where the globstar match is empty
// // by pruning it out, and testing the resulting pattern
// PROCESS(pattern[0..n] + pattern[n+1 .. $], false)
// // handle other cases.
// for ENTRY in ENTRIES (not dotfiles)
// // attach globstar + tail onto the entry
// // Mark that this entry is a globstar match
// PROCESS(pattern[0..n] + ENTRY + pattern[n .. $], true)
//
// else // not globstar
// for ENTRY in ENTRIES (not dotfiles, unless pattern[n] is dot)
// Test ENTRY against pattern[n]
// If fails, continue
// If passes, PROCESS(pattern[0..n] + item + pattern[n+1 .. $])
//
// Caveat:
// Cache all stats and readdirs results to minimize syscall. Since all
// we ever care about is existence and directory-ness, we can just keep
// `true` for files, and [children,...] for directories, or `false` for
// things that don't exist.
module.exports = glob
var rp = require('fs.realpath')
var minimatch = require('minimatch')
var Minimatch = minimatch.Minimatch
var inherits = require('inherits')
var EE = require('events').EventEmitter
var path = require('path')
var assert = require('assert')
var isAbsolute = require('path-is-absolute')
var globSync = require('./sync.js')
var common = require('./common.js')
var setopts = common.setopts
var ownProp = common.ownProp
var inflight = require('inflight')
var util = require('util')
var childrenIgnored = common.childrenIgnored
var isIgnored = common.isIgnored
var once = require('once')
function glob (pattern, options, cb) {
if (typeof options === 'function') cb = options, options = {}
if (!options) options = {}
if (options.sync) {
if (cb)
throw new TypeError('callback provided to sync glob')
return globSync(pattern, options)
}
return new Glob(pattern, options, cb)
}
glob.sync = globSync
var GlobSync = glob.GlobSync = globSync.GlobSync
// old api surface
glob.glob = glob
function extend (origin, add) {
if (add === null || typeof add !== 'object') {
return origin
}
var keys = Object.keys(add)
var i = keys.length
while (i--) {
origin[keys[i]] = add[keys[i]]
}
return origin
}
glob.hasMagic = function (pattern, options_) {
var options = extend({}, options_)
options.noprocess = true
var g = new Glob(pattern, options)
var set = g.minimatch.set
if (!pattern)
return false
if (set.length > 1)
return true
for (var j = 0; j < set[0].length; j++) {
if (typeof set[0][j] !== 'string')
return true
}
return false
}
glob.Glob = Glob
inherits(Glob, EE)
function Glob (pattern, options, cb) {
if (typeof options === 'function') {
cb = options
options = null
}
if (options && options.sync) {
if (cb)
throw new TypeError('callback provided to sync glob')
return new GlobSync(pattern, options)
}
if (!(this instanceof Glob))
return new Glob(pattern, options, cb)
setopts(this, pattern, options)
this._didRealPath = false
// process each pattern in the minimatch set
var n = this.minimatch.set.length
// The matches are stored as {<filename>: true,...} so that
// duplicates are automagically pruned.
// Later, we do an Object.keys() on these.
// Keep them as a list so we can fill in when nonull is set.
this.matches = new Array(n)
if (typeof cb === 'function') {
cb = once(cb)
this.on('error', cb)
this.on('end', function (matches) {
cb(null, matches)
})
}
var self = this
this._processing = 0
this._emitQueue = []
this._processQueue = []
this.paused = false
if (this.noprocess)
return this
if (n === 0)
return done()
var sync = true
for (var i = 0; i < n; i ++) {
this._process(this.minimatch.set[i], i, false, done)
}
sync = false
function done () {
--self._processing
if (self._processing <= 0) {
if (sync) {
process.nextTick(function () {
self._finish()
})
} else {
self._finish()
}
}
}
}
Glob.prototype._finish = function () {
assert(this instanceof Glob)
if (this.aborted)
return
if (this.realpath && !this._didRealpath)
return this._realpath()
common.finish(this)
this.emit('end', this.found)
}
Glob.prototype._realpath = function () {
if (this._didRealpath)
return
this._didRealpath = true
var n = this.matches.length
if (n === 0)
return this._finish()
var self = this
for (var i = 0; i < this.matches.length; i++)
this._realpathSet(i, next)
function next () {
if (--n === 0)
self._finish()
}
}
Glob.prototype._realpathSet = function (index, cb) {
var matchset = this.matches[index]
if (!matchset)
return cb()
var found = Object.keys(matchset)
var self = this
var n = found.length
if (n === 0)
return cb()
var set = this.matches[index] = Object.create(null)
found.forEach(function (p, i) {
// If there's a problem with the stat, then it means that
// one or more of the links in the realpath couldn't be
// resolved. just return the abs value in that case.
p = self._makeAbs(p)
rp.realpath(p, self.realpathCache, function (er, real) {
if (!er)
set[real] = true
else if (er.syscall === 'stat')
set[p] = true
else
self.emit('error', er) // srsly wtf right here
if (--n === 0) {
self.matches[index] = set
cb()
}
})
})
}
Glob.prototype._mark = function (p) {
return common.mark(this, p)
}
Glob.prototype._makeAbs = function (f) {
return common.makeAbs(this, f)
}
Glob.prototype.abort = function () {
this.aborted = true
this.emit('abort')
}
Glob.prototype.pause = function () {
if (!this.paused) {
this.paused = true
this.emit('pause')
}
}
Glob.prototype.resume = function () {
if (this.paused) {
this.emit('resume')
this.paused = false
if (this._emitQueue.length) {
var eq = this._emitQueue.slice(0)
this._emitQueue.length = 0
for (var i = 0; i < eq.length; i ++) {
var e = eq[i]
this._emitMatch(e[0], e[1])
}
}
if (this._processQueue.length) {
var pq = this._processQueue.slice(0)
this._processQueue.length = 0
for (var i = 0; i < pq.length; i ++) {
var p = pq[i]
this._processing--
this._process(p[0], p[1], p[2], p[3])
}
}
}
}
Glob.prototype._process = function (pattern, index, inGlobStar, cb) {
assert(this instanceof Glob)
assert(typeof cb === 'function')
if (this.aborted)
return
this._processing++
if (this.paused) {
this._processQueue.push([pattern, index, inGlobStar, cb])
return
}
//console.error('PROCESS %d', this._processing, pattern)
// Get the first [n] parts of pattern that are all strings.
var n = 0
while (typeof pattern[n] === 'string') {
n ++
}
// now n is the index of the first one that is *not* a string.
// see if there's anything else
var prefix
switch (n) {
// if not, then this is rather simple
case pattern.length:
this._processSimple(pattern.join('/'), index, cb)
return
case 0:
// pattern *starts* with some non-trivial item.
// going to readdir(cwd), but not include the prefix in matches.
prefix = null
break
default:
// pattern has some string bits in the front.
// whatever it starts with, whether that's 'absolute' like /foo/bar,
// or 'relative' like '../baz'
prefix = pattern.slice(0, n).join('/')
break
}
var remain = pattern.slice(n)
// get the list of entries.
var read
if (prefix === null)
read = '.'
else if (isAbsolute(prefix) || isAbsolute(pattern.join('/'))) {
if (!prefix || !isAbsolute(prefix))
prefix = '/' + prefix
read = prefix
} else
read = prefix
var abs = this._makeAbs(read)
//if ignored, skip _processing
if (childrenIgnored(this, read))
return cb()
var isGlobStar = remain[0] === minimatch.GLOBSTAR
if (isGlobStar)
this._processGlobStar(prefix, read, abs, remain, index, inGlobStar, cb)
else
this._processReaddir(prefix, read, abs, remain, index, inGlobStar, cb)
}
Glob.prototype._processReaddir = function (prefix, read, abs, remain, index, inGlobStar, cb) {
var self = this
this._readdir(abs, inGlobStar, function (er, entries) {
return self._processReaddir2(prefix, read, abs, remain, index, inGlobStar, entries, cb)
})
}
Glob.prototype._processReaddir2 = function (prefix, read, abs, remain, index, inGlobStar, entries, cb) {
// if the abs isn't a dir, then nothing can match!
if (!entries)
return cb()
// It will only match dot entries if it starts with a dot, or if
// dot is set. Stuff like @(.foo|.bar) isn't allowed.
var pn = remain[0]
var negate = !!this.minimatch.negate
var rawGlob = pn._glob
var dotOk = this.dot || rawGlob.charAt(0) === '.'
var matchedEntries = []
for (var i = 0; i < entries.length; i++) {
var e = entries[i]
if (e.charAt(0) !== '.' || dotOk) {
var m
if (negate && !prefix) {
m = !e.match(pn)
} else {
m = e.match(pn)
}
if (m)
matchedEntries.push(e)
}
}
//console.error('prd2', prefix, entries, remain[0]._glob, matchedEntries)
var len = matchedEntries.length
// If there are no matched entries, then nothing matches.
if (len === 0)
return cb()
// if this is the last remaining pattern bit, then no need for
// an additional stat *unless* the user has specified mark or
// stat explicitly. We know they exist, since readdir returned
// them.
if (remain.length === 1 && !this.mark && !this.stat) {
if (!this.matches[index])
this.matches[index] = Object.create(null)
for (var i = 0; i < len; i ++) {
var e = matchedEntries[i]
if (prefix) {
if (prefix !== '/')
e = prefix + '/' + e
else
e = prefix + e
}
if (e.charAt(0) === '/' && !this.nomount) {
e = path.join(this.root, e)
}
this._emitMatch(index, e)
}
// This was the last one, and no stats were needed
return cb()
}
// now test all matched entries as stand-ins for that part
// of the pattern.
remain.shift()
for (var i = 0; i < len; i ++) {
var e = matchedEntries[i]
var newPattern
if (prefix) {
if (prefix !== '/')
e = prefix + '/' + e
else
e = prefix + e
}
this._process([e].concat(remain), index, inGlobStar, cb)
}
cb()
}
Glob.prototype._emitMatch = function (index, e) {
if (this.aborted)
return
if (isIgnored(this, e))
return
if (this.paused) {
this._emitQueue.push([index, e])
return
}
var abs = isAbsolute(e) ? e : this._makeAbs(e)
if (this.mark)
e = this._mark(e)
if (this.absolute)
e = abs
if (this.matches[index][e])
return
if (this.nodir) {
var c = this.cache[abs]
if (c === 'DIR' || Array.isArray(c))
return
}
this.matches[index][e] = true
var st = this.statCache[abs]
if (st)
this.emit('stat', e, st)
this.emit('match', e)
}
Glob.prototype._readdirInGlobStar = function (abs, cb) {
if (this.aborted)
return
// follow all symlinked directories forever
// just proceed as if this is a non-globstar situation
if (this.follow)
return this._readdir(abs, false, cb)
var lstatkey = 'lstat\0' + abs
var self = this
var lstatcb = inflight(lstatkey, lstatcb_)
if (lstatcb)
self.fs.lstat(abs, lstatcb)
function lstatcb_ (er, lstat) {
if (er && er.code === 'ENOENT')
return cb()
var isSym = lstat && lstat.isSymbolicLink()
self.symlinks[abs] = isSym
// If it's not a symlink or a dir, then it's definitely a regular file.
// don't bother doing a readdir in that case.
if (!isSym && lstat && !lstat.isDirectory()) {
self.cache[abs] = 'FILE'
cb()
} else
self._readdir(abs, false, cb)
}
}
Glob.prototype._readdir = function (abs, inGlobStar, cb) {
if (this.aborted)
return
cb = inflight('readdir\0'+abs+'\0'+inGlobStar, cb)
if (!cb)
return
//console.error('RD %j %j', +inGlobStar, abs)
if (inGlobStar && !ownProp(this.symlinks, abs))
return this._readdirInGlobStar(abs, cb)
if (ownProp(this.cache, abs)) {
var c = this.cache[abs]
if (!c || c === 'FILE')
return cb()
if (Array.isArray(c))
return cb(null, c)
}
var self = this
self.fs.readdir(abs, readdirCb(this, abs, cb))
}
function readdirCb (self, abs, cb) {
return function (er, entries) {
if (er)
self._readdirError(abs, er, cb)
else
self._readdirEntries(abs, entries, cb)
}
}
Glob.prototype._readdirEntries = function (abs, entries, cb) {
if (this.aborted)
return
// if we haven't asked to stat everything, then just
// assume that everything in there exists, so we can avoid
// having to stat it a second time.
if (!this.mark && !this.stat) {
for (var i = 0; i < entries.length; i ++) {
var e = entries[i]
if (abs === '/')
e = abs + e
else
e = abs + '/' + e
this.cache[e] = true
}
}
this.cache[abs] = entries
return cb(null, entries)
}
Glob.prototype._readdirError = function (f, er, cb) {
if (this.aborted)
return
// handle errors, and cache the information
switch (er.code) {
case 'ENOTSUP': // https://github.com/isaacs/node-glob/issues/205
case 'ENOTDIR': // totally normal. means it *does* exist.
var abs = this._makeAbs(f)
this.cache[abs] = 'FILE'
if (abs === this.cwdAbs) {
var error = new Error(er.code + ' invalid cwd ' + this.cwd)
error.path = this.cwd
error.code = er.code
this.emit('error', error)
this.abort()
}
break<|fim▁hole|> case 'ELOOP':
case 'ENAMETOOLONG':
case 'UNKNOWN':
this.cache[this._makeAbs(f)] = false
break
default: // some unusual error. Treat as failure.
this.cache[this._makeAbs(f)] = false
if (this.strict) {
this.emit('error', er)
// If the error is handled, then we abort
// if not, we threw out of here
this.abort()
}
if (!this.silent)
console.error('glob error', er)
break
}
return cb()
}
Glob.prototype._processGlobStar = function (prefix, read, abs, remain, index, inGlobStar, cb) {
var self = this
this._readdir(abs, inGlobStar, function (er, entries) {
self._processGlobStar2(prefix, read, abs, remain, index, inGlobStar, entries, cb)
})
}
Glob.prototype._processGlobStar2 = function (prefix, read, abs, remain, index, inGlobStar, entries, cb) {
//console.error('pgs2', prefix, remain[0], entries)
// no entries means not a dir, so it can never have matches
// foo.txt/** doesn't match foo.txt
if (!entries)
return cb()
// test without the globstar, and with every child both below
// and replacing the globstar.
var remainWithoutGlobStar = remain.slice(1)
var gspref = prefix ? [ prefix ] : []
var noGlobStar = gspref.concat(remainWithoutGlobStar)
// the noGlobStar pattern exits the inGlobStar state
this._process(noGlobStar, index, false, cb)
var isSym = this.symlinks[abs]
var len = entries.length
// If it's a symlink, and we're in a globstar, then stop
if (isSym && inGlobStar)
return cb()
for (var i = 0; i < len; i++) {
var e = entries[i]
if (e.charAt(0) === '.' && !this.dot)
continue
// these two cases enter the inGlobStar state
var instead = gspref.concat(entries[i], remainWithoutGlobStar)
this._process(instead, index, true, cb)
var below = gspref.concat(entries[i], remain)
this._process(below, index, true, cb)
}
cb()
}
Glob.prototype._processSimple = function (prefix, index, cb) {
// XXX review this. Shouldn't it be doing the mounting etc
// before doing stat? kinda weird?
var self = this
this._stat(prefix, function (er, exists) {
self._processSimple2(prefix, index, er, exists, cb)
})
}
Glob.prototype._processSimple2 = function (prefix, index, er, exists, cb) {
//console.error('ps2', prefix, exists)
if (!this.matches[index])
this.matches[index] = Object.create(null)
// If it doesn't exist, then just mark the lack of results
if (!exists)
return cb()
if (prefix && isAbsolute(prefix) && !this.nomount) {
var trail = /[\/\\]$/.test(prefix)
if (prefix.charAt(0) === '/') {
prefix = path.join(this.root, prefix)
} else {
prefix = path.resolve(this.root, prefix)
if (trail)
prefix += '/'
}
}
if (process.platform === 'win32')
prefix = prefix.replace(/\\/g, '/')
// Mark this as a match
this._emitMatch(index, prefix)
cb()
}
// Returns either 'DIR', 'FILE', or false
Glob.prototype._stat = function (f, cb) {
var abs = this._makeAbs(f)
var needDir = f.slice(-1) === '/'
if (f.length > this.maxLength)
return cb()
if (!this.stat && ownProp(this.cache, abs)) {
var c = this.cache[abs]
if (Array.isArray(c))
c = 'DIR'
// It exists, but maybe not how we need it
if (!needDir || c === 'DIR')
return cb(null, c)
if (needDir && c === 'FILE')
return cb()
// otherwise we have to stat, because maybe c=true
// if we know it exists, but not what it is.
}
var exists
var stat = this.statCache[abs]
if (stat !== undefined) {
if (stat === false)
return cb(null, stat)
else {
var type = stat.isDirectory() ? 'DIR' : 'FILE'
if (needDir && type === 'FILE')
return cb()
else
return cb(null, type, stat)
}
}
var self = this
var statcb = inflight('stat\0' + abs, lstatcb_)
if (statcb)
self.fs.lstat(abs, statcb)
function lstatcb_ (er, lstat) {
if (lstat && lstat.isSymbolicLink()) {
// If it's a symlink, then treat it as the target, unless
// the target does not exist, then treat it as a file.
return self.fs.stat(abs, function (er, stat) {
if (er)
self._stat2(f, abs, null, lstat, cb)
else
self._stat2(f, abs, er, stat, cb)
})
} else {
self._stat2(f, abs, er, lstat, cb)
}
}
}
Glob.prototype._stat2 = function (f, abs, er, stat, cb) {
if (er && (er.code === 'ENOENT' || er.code === 'ENOTDIR')) {
this.statCache[abs] = false
return cb()
}
var needDir = f.slice(-1) === '/'
this.statCache[abs] = stat
if (abs.slice(-1) === '/' && stat && !stat.isDirectory())
return cb(null, false, stat)
var c = true
if (stat)
c = stat.isDirectory() ? 'DIR' : 'FILE'
this.cache[abs] = this.cache[abs] || c
if (needDir && c === 'FILE')
return cb()
return cb(null, c, stat)
}<|fim▁end|> |
case 'ENOENT': // not terribly unusual |
<|file_name|>_jcollection.py<|end_file_name|><|fim▁begin|>#*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
import collections
from . import _jclass
class _WrappedIterator(object):
"""
Wraps a Java iterator to respect the Python 3 iterator API
"""
def __init__(self, iterator):
self.iterator = iterator
def __iter__(self):
return self.iterator
def __next__(self):
return next(self.iterator)
# Compatibility name
next = __next__
def _initialize():
_jclass.registerClassCustomizer(CollectionCustomizer())
_jclass.registerClassCustomizer(ListCustomizer())
_jclass.registerClassCustomizer(MapCustomizer())
_jclass.registerClassCustomizer(IteratorCustomizer())
_jclass.registerClassCustomizer(EnumerationCustomizer())
def isPythonSequence(v):
if isinstance(v, collections.Sequence):
if not hasattr(v.__class__, '__metaclass__') \
or v.__class__.__metaclass__ is _jclass._JavaClass:
return True
return False
def _colLength(self):
return self.size()
def _colIter(self):
return _WrappedIterator(self.iterator())
def _colDelItem(self, i):
return self.remove(i)
def _colAddAll(self, v):
if isPythonSequence(v):
r = False
for i in v:
r = self.add(i) or r
return r
else:
return self._addAll(v)
def _colRemoveAll(self, v):
if isPythonSequence(v):
r = False
for i in v:
r = self.remove(i) or r
return r
else:
return self._removeAll(v)
def _colRetainAll(self, v):
if isPythonSequence(v):
r = _jclass.JClass("java.util.ArrayList")(len(v))
for i in v:
r.add(i)
else:
r = v
return self._retainAll(r)
class CollectionCustomizer(object):
_METHODS = {
'__len__': _colLength,
'__iter__': _colIter,
'__delitem__': _colDelItem,
}
def canCustomize(self, name, jc):
if name == 'java.util.Collection':
return True
return jc.isSubclass('java.util.Collection')
def customize(self, name, jc, bases, members):
if name == 'java.util.Collection':
members.update(CollectionCustomizer._METHODS)
else:
# AddAll is handled by List
if (not jc.isSubclass("java.util.List")) and 'addAll' in members:
members['_addAll'] = members['addAll']
members['addAll'] = _colAddAll
if 'removeAll' in members:
members['_removeAll'] = members['removeAll']
members['removeAll'] = _colRemoveAll
if 'retainAll' in members:
members['_retainAll'] = members['retainAll']
members['retainAll'] = _colRetainAll
def _listGetItem(self, ndx):
if isinstance(ndx, slice):
start = ndx.start
stop = ndx.stop
if start < 0:
start = self.size() + start
if stop < 0:
stop = self.size() + stop
return self.subList(start, stop)
else:
if ndx < 0:
ndx = self.size() + ndx
return self.get(ndx)
def _listSetItem(self, ndx, v):
if isinstance(ndx, slice):
start = ndx.start
stop = ndx.stop
if start < 0:
start = self.size() + start
if stop < 0:
stop = self.size() + stop
for i in range(start, stop):
self.remove(start)
if isinstance(v, collections.Sequence):
ndx = start
for i in v:
self.add(ndx, i)
ndx += 1
else:
if ndx < 0:
ndx = self.size() + ndx
self.set(ndx, v)
def _listAddAll(self, v, v2=None):
if isPythonSequence(v):
r = False
if v2 is not None: # assume form (int, values)
for i in range(len(v2)):
r = r or self.add(v + i, v2[i])
else:
for i in v:
r = self.add(i) or r
return r
else:
return self._addAll(v)
class ListCustomizer(object):
_METHODS = {
'__setitem__': _listSetItem,
'__getitem__': _listGetItem,
}
def canCustomize(self, name, jc):
if name == 'java.util.List':
return True
return jc.isSubclass('java.util.List')
def customize(self, name, jc, bases, members):
if name == 'java.util.List':
members.update(ListCustomizer._METHODS)
else:
if 'addAll' in members:
members['_addAll'] = members['addAll']
members['addAll'] = _listAddAll
def isPythonMapping(v):
if isinstance(v, collections.Mapping):
if not hasattr(v.__class__, '__metaclass__') or \
v.__class__.__metaclass__ is _jclass._JavaClass:
return True
return False
def _mapLength(self):
return self.size()
def _mapIter(self):
return _WrappedIterator(self.keySet().iterator())
def _mapDelItem(self, i):
return self.remove(i)
def _mapGetItem(self, ndx):
return self.get(ndx)
def _mapSetItem(self, ndx, v):
self.put(ndx, v)
def _mapPutAll(self, v):
if isPythonMapping(v):
for i in v:
self.put(i, v[i])
else:
# do the regular method ...
self._putAll(v)
class MapCustomizer(object):
_METHODS = {
'__len__': _mapLength,
'__iter__': _mapIter,
'__delitem__': _mapDelItem,
'__getitem__': _mapGetItem,
'__setitem__': _mapSetItem,
}
def canCustomize(self, name, jc):
if name == 'java.util.Map':
return True
return jc.isSubclass('java.util.Map')
def customize(self, name, jc, bases, members):
if name == 'java.util.Map':
members.update(MapCustomizer._METHODS)
else:
if "putAll" in members:
members["_putAll"] = members["putAll"]
members["putAll"] = _mapPutAll
def _iterCustomNext(self):
if self.hasNext():
return self._next()
raise StopIteration
def _iterIteratorNext(self):
if self.hasNext():
return next(self)
raise StopIteration
<|fim▁hole|>class IteratorCustomizer(object):
_METHODS = {
'__iter__': _iterIter,
'__next__': _iterCustomNext,
}
def canCustomize(self, name, jc):
if name == 'java.util.Iterator':
return True
return jc.isSubclass('java.util.Iterator')
def customize(self, name, jc, bases, members):
if name == 'java.util.Iterator':
members.update(IteratorCustomizer._METHODS)
elif jc.isSubclass('java.util.Iterator'):
__next__ = 'next' if 'next' in members else '__next__'
members['_next'] = members[__next__]
members[__next__] = _iterCustomNext
def _enumNext(self):
if self.hasMoreElements():
return self.nextElement()
raise StopIteration
def _enumIter(self):
return self
class EnumerationCustomizer(object):
_METHODS = {
'next': _enumNext,
'__next__': _enumNext,
'__iter__': _enumIter,
}
def canCustomize(self, name, jc):
return name == 'java.util.Enumeration'
def customize(self, name, jc, bases, members):
members.update(EnumerationCustomizer._METHODS)<|fim▁end|> | def _iterIter(self):
return self
|
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>import * as Model from 'cerebral-model-baobab'
import { githubLibraryGet } from './signals/githubLibraryGet'
import { libraryGithubPath } from './signals/libraryGithubPath'
import { libraryGithubToken } from './signals/libraryGithubToken'
import { makeId } from '../../modules/Factory'
import { name } from './signals/name'
export interface UserSignalsType {
githubLibraryGet () // try to fetch library from github
libraryGithubPath ( input: { value: string } )
libraryGithubToken ( input: { value: string } )
name ( input: { value: string } )
}
const defaultUser =
{ _id: 'gaspard' //makeId ()
, type: 'user'
, name: 'New user'
// selected elements
, projectId: null
, blockId: null
, sceneId: null
}
const CurrentUser = Model.monkey
( { cursors:
{ userById: [ 'data', 'user' ]
, id: [ '$auth', 'userId' ]
}
, get ( data ) {
const userById = data.userById || {}
return userById [ data.id || 'gaspard' ]
|| defaultUser
}
}
)
<|fim▁hole|> module.addState
( CurrentUser
)
module.addSignals
( { name
, githubLibraryGet
, libraryGithubPath
, libraryGithubToken
}
)
return {} // meta information
}
}<|fim▁end|> | export const User =
( options = {}) => {
return (module, controller) => { |
<|file_name|>uber_API.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import json
import dateutil.parser<|fim▁hole|>import numpy as np
import calendar
import itertools
from flask import Flask, request, Response, render_template, redirect, url_for
import Uber
app = Flask(__name__)
'''
The index page has links to the from_file API and the from_stream API.
'''
@app.route('/')
def index():
return render_template('index.html', links={'from_file':url_for('from_file', data_file='uber_demand_prediction_challenge.json'), 'from_stream':url_for('from_stream')})
'''
The from_file API. Accepts a get parameter 'data_file' that points at a data file
containing the login data.
'''
@app.route('/from_file', methods=['GET'])
def from_file():
if request.method == 'GET':
data_file = request.args.get('data_file', '')
dp = Uber.DemandPredictor()
f = open(data_file,'r')
logins = json.loads(f.read())
f.close()
logins_np = np.array([dateutil.parser.parse(x) for x in logins], dtype=datetime.datetime)
for login in logins_np:
dp.addLogin(login)
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
forecast = []
start_date = datetime.datetime(2012, 5, 1, hour = 0, minute = 0, second = 0)
end_date = datetime.datetime(2012, 5, 15, hour = 23, minute = 59, second = 59)
current_date = datetime.datetime(1972, 11, 16, hour = 0, minute = 0, second = 0)
day_index = -1
for single_date in Uber.daterange(start_date, end_date, increment='hours'):
if single_date.date() != current_date.date():
forecast.append(
{
'display_date': '%s, %s %i'%(days[single_date.weekday()], calendar.month_name[single_date.month], single_date.day),
'forecasts': [dp.forecast(single_date.weekday(), single_date.hour)]
}
)
current_date = single_date
day_index += 1
else:
forecast[day_index]['forecasts'].append(dp.forecast(single_date.weekday(), single_date.hour));
return render_template('from_file.html', forecast=json.dumps(forecast))
'''
The from_stream API.
'''
@app.route('/from_stream')
def from_stream():
dp = Uber.DemandPredictor()
'''
This is a fake stream of data. It loops over the provided JSON file.
'''
def login_stream(logins):
for login in itertools.cycle(logins):
parsed_login = dateutil.parser.parse(login)
dp.addLogin(parsed_login)
day = parsed_login.weekday()
hour = parsed_login.hour
forecast = dp.forecast(day, hour)
ret = {'day':day, 'hour':hour, 'forecast':forecast}
yield "data: %s\n\n" % (json.dumps(ret))
data_file = 'uber_demand_prediction_challenge.json'
f = open(data_file,'r')
logins = json.loads(f.read())
f.close()
if request.headers.get('accept') == 'text/event-stream':
return Response(login_stream(logins), content_type='text/event-stream')
return redirect(url_for('static', filename='from_stream.html'))
if __name__ == '__main__':
app.run(debug=True, threaded=True)<|fim▁end|> | import datetime |
<|file_name|>config.go<|end_file_name|><|fim▁begin|>// +build integration,perftest
package main
import (
"flag"
"fmt"
"net/http"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
type Config struct {
Bucket string
Filename string
Size int64
TempDir string
LogVerbose bool
SDK SDKConfig
Client ClientConfig
}
func (c *Config) SetupFlags(prefix string, flagset *flag.FlagSet) {<|fim▁hole|> "The S3 bucket `name` to upload the object to.")
flagset.StringVar(&c.Filename, "file", "",
"The `path` of the local file to upload.")
flagset.Int64Var(&c.Size, "size", 0,
"The S3 object size in bytes to upload")
flagset.StringVar(&c.TempDir, "temp", os.TempDir(), "location to create temporary files")
flagset.BoolVar(&c.LogVerbose, "verbose", false,
"The output log will include verbose request information")
c.SDK.SetupFlags(prefix, flagset)
c.Client.SetupFlags(prefix, flagset)
}
func (c *Config) Validate() error {
var errs Errors
if len(c.Bucket) == 0 || (c.Size <= 0 && c.Filename == "") {
errs = append(errs, fmt.Errorf("bucket and filename/size are required"))
}
if err := c.SDK.Validate(); err != nil {
errs = append(errs, err)
}
if err := c.Client.Validate(); err != nil {
errs = append(errs, err)
}
if len(errs) != 0 {
return errs
}
return nil
}
type SDKConfig struct {
PartSize int64
Concurrency int
WithUnsignedPayload bool
WithContentMD5 bool
ExpectContinue bool
BufferProvider s3manager.ReadSeekerWriteToProvider
}
func (c *SDKConfig) SetupFlags(prefix string, flagset *flag.FlagSet) {
prefix += "sdk."
flagset.Int64Var(&c.PartSize, prefix+"part-size", s3manager.DefaultUploadPartSize,
"Specifies the `size` of parts of the object to upload.")
flagset.IntVar(&c.Concurrency, prefix+"concurrency", s3manager.DefaultUploadConcurrency,
"Specifies the number of parts to upload `at once`.")
flagset.BoolVar(&c.WithUnsignedPayload, prefix+"unsigned", false,
"Specifies if the SDK will use UNSIGNED_PAYLOAD for part SHA256 in request signature.")
flagset.BoolVar(&c.WithContentMD5, prefix+"content-md5", true,
"Specifies if the SDK should compute the content md5 header for S3 uploads.")
flagset.BoolVar(&c.ExpectContinue, prefix+"100-continue", true,
"Specifies if the SDK requests will wait for the 100 continue response before sending request payload.")
}
func (c *SDKConfig) Validate() error {
return nil
}
type ClientConfig struct {
KeepAlive bool
Timeouts Timeouts
MaxIdleConns int
MaxIdleConnsPerHost int
}
func (c *ClientConfig) SetupFlags(prefix string, flagset *flag.FlagSet) {
prefix += "client."
flagset.BoolVar(&c.KeepAlive, prefix+"http-keep-alive", true,
"Specifies if HTTP keep alive is enabled.")
defTR := http.DefaultTransport.(*http.Transport)
flagset.IntVar(&c.MaxIdleConns, prefix+"idle-conns", defTR.MaxIdleConns,
"Specifies max idle connection pool size.")
flagset.IntVar(&c.MaxIdleConnsPerHost, prefix+"idle-conns-host", http.DefaultMaxIdleConnsPerHost,
"Specifies max idle connection pool per host, will be truncated by idle-conns.")
c.Timeouts.SetupFlags(prefix, flagset)
}
func (c *ClientConfig) Validate() error {
var errs Errors
if err := c.Timeouts.Validate(); err != nil {
errs = append(errs, err)
}
if len(errs) != 0 {
return errs
}
return nil
}
type Timeouts struct {
Connect time.Duration
TLSHandshake time.Duration
ExpectContinue time.Duration
ResponseHeader time.Duration
}
func (c *Timeouts) SetupFlags(prefix string, flagset *flag.FlagSet) {
prefix += "timeout."
flagset.DurationVar(&c.Connect, prefix+"connect", 30*time.Second,
"The `timeout` connecting to the remote host.")
defTR := http.DefaultTransport.(*http.Transport)
flagset.DurationVar(&c.TLSHandshake, prefix+"tls", defTR.TLSHandshakeTimeout,
"The `timeout` waiting for the TLS handshake to complete.")
flagset.DurationVar(&c.ExpectContinue, prefix+"expect-continue", defTR.ExpectContinueTimeout,
"The `timeout` waiting for the TLS handshake to complete.")
flagset.DurationVar(&c.ResponseHeader, prefix+"response-header", defTR.ResponseHeaderTimeout,
"The `timeout` waiting for the TLS handshake to complete.")
}
func (c *Timeouts) Validate() error {
return nil
}
type Errors []error
func (es Errors) Error() string {
var buf strings.Builder
for _, e := range es {
buf.WriteString(e.Error())
}
return buf.String()
}<|fim▁end|> | flagset.StringVar(&c.Bucket, "bucket", "", |
<|file_name|>udp_servcer.rs<|end_file_name|><|fim▁begin|>use std::env::args;
use std::net::UdpSocket;
use std::str::from_utf8;
fn main() -> std::io::Result<()> {
let arguments: Vec<String> = args().collect();
let addr = &arguments[1];<|fim▁hole|> let socket = UdpSocket::bind(addr)?;
let mut buf = [0; 100];
let (amt, src) = socket.recv_from(&mut buf)?;
println!("{}:{:?}:{}", amt, src, from_utf8(&buf).unwrap());
Ok(())
}<|fim▁end|> | |
<|file_name|>fuzz_one.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright 2020 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Helper script to execute a single-processed fuzzing session.
Creates fuzz tests in workdir/output/dir-<dir number>/fuzz-XXX.js.
Expects the <dir number> as single parameter.
"""
import os
import subprocess
import sys
import time
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
APP_DIR = os.path.join(BASE_PATH, 'workdir', 'app_dir')<|fim▁hole|>FUZZ_EXE = os.path.join(BASE_PATH, 'workdir', 'fuzzer', 'ochang_js_fuzzer')
INPUT_DIR = os.path.join(BASE_PATH, 'workdir', 'input')
TEST_CASES = os.path.join(BASE_PATH, 'workdir', 'output')
COUNT = 64
FUZZ = ('FUZZ_MODE=foozzie APP_NAME=d8 APP_DIR=%s %s -o %%s -n %s -i %s > %%s'
% (APP_DIR, FUZZ_EXE, COUNT, INPUT_DIR))
assert(len(sys.argv) > 1)
dir_number = int(sys.argv[1])
assert(dir_number >= 0)
path = os.path.join(TEST_CASES, 'dir-%d' % dir_number)
assert not os.path.exists(path), 'Need fresh workdir for fuzzing'
os.makedirs(path)
start = time.time()
subprocess.check_call(
FUZZ % (path, os.path.join(path, 'out.log')), shell=True)
duration = int(time.time() - start)
with open(os.path.join(path, 'duration.log'), 'w') as f:
f.write(str(duration))<|fim▁end|> | |
<|file_name|>formfields.py<|end_file_name|><|fim▁begin|># coding: utf-8
# Copyright (c) 2012, SciELO <[email protected]>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import re
from django import forms
from django.utils.translation import ugettext_lazy as _
class ISSNField(forms.CharField):
default_error_messages = {
'invalid': _('Enter a valid ISSN.')
}
<|fim▁hole|>
if value is not u'' and value is not None:
result = re.match(self.regex, value)
if result is None:
raise forms.ValidationError(self.error_messages['invalid'])
return value<|fim▁end|> | regex = r'[0-9]{4}-[0-9]{3}[0-9X]{1}$'
def clean(self, value): |
<|file_name|>Tabs.test.js<|end_file_name|><|fim▁begin|>import React from 'react';
import { shallow, mount } from 'enzyme';
import Tabs from '../components/layout/Tabs';<|fim▁hole|> { label: 'Přihlášení', render: () => {} },
{ label: 'Registrace', render: () => {} }
];
it('renders without crashing', () => {
expect(
shallow(<Tabs tabs={tabs} />).length
).toEqual(1);
});
it('contains exactly two tabs', () => {
const wrapper = mount(<Tabs tabs={tabs} />);
expect(wrapper.render().find('li').length).toEqual(2);
});
it('contains correct labels in tabs', () => {
const wrapper = mount(<Tabs tabs={tabs} />);
expect(wrapper.find('li').at(0).text()).toEqual(tabs[0].label);
expect(wrapper.find('li').at(1).text()).toEqual(tabs[1].label);
});
});<|fim▁end|> |
describe('Tabs render with different props', () => {
const tabs = [ |
<|file_name|>diff_xml_files.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getopt, glob, os, sys
def main(argv):
f1 = ""
f2 = ""
# Get the base folder
try:
opts, args = getopt.getopt(argv, "h", ["f1=", "f2="])
except getopt.GetoptError:
print 'The file options for build_saxon_collection_xml.py were not correctly specified.'
print 'To see a full list of options try:'
print ' $ python build_saxon_collection_xml.py -h'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'Options:'
print ' -f The base folder to create collection XML file.'
sys.exit()
elif opt in ('--f1'):
# check if file exists.
if os.path.exists(arg):
f1 = arg
else:
print 'Error: Argument must be a file name for --f1.'
sys.exit()
elif opt in ('--f2'):
# check if file exists.
if os.path.exists(arg):
f2 = arg
else:
print 'Error: Argument must be a file name for --f2.'
sys.exit()
# Required fields to run the script.
if f1 == "" or not os.path.exists(f1):
print 'Error: The file path option must be supplied: --f1.'
sys.exit()
if f2 == "" or not os.path.exists(f2):
print 'Error: The file path option must be supplied: --f2.'<|fim▁hole|> found_in_both = []
with open(f1) as f:
content_f1 = f.readlines()
set_f1 = set(content_f1)
with open(f2) as f:
content_f2 = f.readlines()
set_f2 = set(content_f2)
missing_in_f1 = set_f2.difference(set_f1)
missing_in_f2 = set_f1.difference(set_f2)
found_in_both = set_f1.intersection(set_f2)
print ""
print "Missing files in " + f1
for f1_name in missing_in_f1:
print " + " + f1_name.strip()
print ""
print "Missing files in " + f2
for f2_name in missing_in_f2:
print " + " + f2_name.strip()
offset = 40
print ""
print "XML Summary"
print (" - Found in both:").ljust(offset) + str(len(found_in_both))
print (" - " + f1 + " diff set vs list:").ljust(offset) + str(len(content_f1) - len(set_f1))
print (" - " + f2 + " diff set vs list:").ljust(offset) + str(len(content_f2) - len(set_f2))
print (" - " + f1 + " missing:").ljust(offset) + str(len(missing_in_f1))
print (" - " + f2 + " missing:").ljust(offset) + str(len(missing_in_f2))
if __name__ == "__main__":
main(sys.argv[1:])<|fim▁end|> | sys.exit()
missing_in_f1 = []
missing_in_f2 = [] |
<|file_name|>channel.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from wtforms import validators
from jinja2 import Markup
from studio.core.engines import db
from riitc.models import NaviModel, ChannelModel
from .base import BaseView
from .forms import CKTextAreaField
class Navi(BaseView):
column_labels = {'name': '名称', 'channels': '频道列表'}
column_list = ['id', 'name', 'channels', 'date_created']
def _list_channels(self, context, model, name):
lis = ''
for channel in model.channels:
lis += '<li>%s</li>' % (channel)<|fim▁hole|>
column_formatters = {
'channels': _list_channels,
}
def __init__(self, **kwargs):
super(Navi, self).__init__(NaviModel, db.session, **kwargs)
def create_form(self, obj=None):
form = super(Navi, self).create_form()
delattr(form, 'date_created')
return form
def edit_form(self, obj=None):
form = super(Navi, self).edit_form(obj=obj)
delattr(form, 'date_created')
return form
class Channel(BaseView):
create_template = 'panel/channel_edit.html'
edit_template = 'panel/channel_edit.html'
column_labels = {'name': '名称',
'parent': '主分类(本身为主分类,不填写)',
'summary': '简介',
'date_created': '创建时间'}
column_searchable_list = ['name', ]
column_default_sort = ('date_created', True)
form_extra_fields = {
'summary': CKTextAreaField('简介',
validators=[validators.Required()]),
}
def __init__(self, **kwargs):
super(Channel, self).__init__(ChannelModel, db.session, **kwargs)
def create_form(self, obj=None):
form = super(Channel, self).create_form()
delattr(form, 'articles')
delattr(form, 'channels')
delattr(form, 'all_articles')
delattr(form, 'date_created')
return form
def edit_form(self, obj=None):
form = super(Channel, self).edit_form(obj=obj)
delattr(form, 'articles')
delattr(form, 'channels')
delattr(form, 'all_articles')
delattr(form, 'date_created')
return form<|fim▁end|> | return Markup('<ol>' + lis + '</ol>') |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>import calendar
import collections
try:
from collections.abc import Iterable
except:
from collections import Iterable
from time import strptime
from six import string_types
from lxml import etree
from itertools import chain
def remove_namespace(tree):
"""
Strip namespace from parsed XML
"""
for node in tree.iter():<|fim▁hole|> if has_namespace:
node.tag = node.tag.split("}", 1)[1]
def read_xml(path, nxml=False):
"""
Parse tree from given XML path
"""
try:
tree = etree.parse(path)
if ".nxml" in path or nxml:
remove_namespace(tree) # strip namespace when reading an XML file
except:
try:
tree = etree.fromstring(path)
except Exception:
print(
"Error: it was not able to read a path, a file-like object, or a string as an XML"
)
raise
return tree
def stringify_children(node):
"""
Filters and removes possible Nones in texts and tails
ref: http://stackoverflow.com/questions/4624062/get-all-text-inside-a-tag-in-lxml
"""
parts = (
[node.text]
+ list(chain(*([c.text, c.tail] for c in node.getchildren())))
+ [node.tail]
)
return "".join(filter(None, parts))
def stringify_affiliation(node):
"""
Filters and removes possible Nones in texts and tails
ref: http://stackoverflow.com/questions/4624062/get-all-text-inside-a-tag-in-lxml
"""
parts = (
[node.text]
+ list(
chain(
*(
[c.text if (c.tag != "label" and c.tag != "sup") else "", c.tail]
for c in node.getchildren()
)
)
)
+ [node.tail]
)
return " ".join(filter(None, parts))
def stringify_affiliation_rec(node):
"""
Flatten and join list to string
ref: http://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists-in-python
"""
parts = _recur_children(node)
parts_flatten = list(_flatten(parts))
return " ".join(parts_flatten).strip()
def _flatten(l):
"""
Flatten list into one dimensional
"""
for el in l:
if isinstance(el, Iterable) and not isinstance(el, string_types):
for sub in _flatten(el):
yield sub
else:
yield el
def _recur_children(node):
"""
Recursive through node to when it has multiple children
"""
if len(node.getchildren()) == 0:
parts = (
([node.text or ""] + [node.tail or ""])
if (node.tag != "label" and node.tag != "sup")
else ([node.tail or ""])
)
return parts
else:
parts = (
[node.text or ""]
+ [_recur_children(c) for c in node.getchildren()]
+ [node.tail or ""]
)
return parts
def month_or_day_formater(month_or_day):
"""
Parameters
----------
month_or_day: str or int
must be one of the following:
(i) month: a three letter month abbreviation, e.g., 'Jan'.
(ii) day: an integer.
Returns
-------
numeric: str
a month of the form 'MM' or a day of the form 'DD'.
Note: returns None if:
(a) the input could not be mapped to a known month abbreviation OR
(b) the input was not an integer (i.e., a day).
"""
if month_or_day.replace(".", "") in filter(None, calendar.month_abbr):
to_format = strptime(month_or_day.replace(".", ""), "%b").tm_mon
elif month_or_day.strip().isdigit() and "." not in str(month_or_day):
to_format = int(month_or_day.strip())
else:
return None
return ("0" if to_format < 10 else "") + str(to_format)
def pretty_print(node):
"""
Pretty print a given lxml node
"""
print(etree.tostring(node, pretty_print=True).decode("utf-8"))<|fim▁end|> | try:
has_namespace = node.tag.startswith("{")
except AttributeError:
continue # node.tag is not a string (node is a comment or similar) |
<|file_name|>ags-simplemarkersymbol.ts<|end_file_name|><|fim▁begin|>import ol = require("openlayers");
import Serializer = require("./serializer");
import { StyleConverter } from "ol3-symbolizer/ol3-symbolizer/format/ags-symbolizer";
const converter = new StyleConverter();
export class SimpleMarkerConverter implements Serializer.IConverter<any> {
toJson(style: ol.style.Style) {<|fim▁hole|> throw "not-implemented";
}
fromJson(json: any) {
return converter.fromJson(json);
}
}<|fim▁end|> | |
<|file_name|>SelectVisitor.java<|end_file_name|><|fim▁begin|>/**
* Copyright MaDgIK Group 2010 - 2015.
*/
package madgik.exareme.master.queryProcessor.decomposer.query.visitors;
import com.foundationdb.sql.StandardException;
import com.foundationdb.sql.parser.FromSubquery;
import com.foundationdb.sql.parser.SelectNode;<|fim▁hole|>
/**
* @author heraldkllapi
*/
public class SelectVisitor extends AbstractVisitor {
public SelectVisitor(SQLQuery query) {
super(query);
}
@Override
public Visitable visit(Visitable node) throws StandardException {
if (node instanceof SelectNode) {
if (((SelectNode) node).isDistinct()) {
query.setOutputColumnsDistinct(true);
}
// Result columns
ResultColumnsVisitor projectVisitor = new ResultColumnsVisitor(query);
node.accept(projectVisitor);
// Input tables
FromListVisitor fromVisitor = new FromListVisitor(query);
node.accept(fromVisitor);
// Where conditions
WhereClauseVisitor whereVisitor = new WhereClauseVisitor(query);
whereVisitor.setVisitedJoin(true);
node.accept(whereVisitor);
// Group by
GroupByListVisitor groupByVisitor = new GroupByListVisitor(query);
node.accept(groupByVisitor);
return node;
}
return node;
}
@Override
public boolean skipChildren(Visitable node) {
return FromSubquery.class.isInstance(node);
}
}<|fim▁end|> | import com.foundationdb.sql.parser.Visitable;
import madgik.exareme.master.queryProcessor.decomposer.query.SQLQuery; |
<|file_name|>fields.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django import forms
from django.conf import settings
from django.forms.utils import ValidationError
from os import chmod
import hashlib
from io import BytesIO
try:
import pyclamd
except Exception:
pass
class yatsFileField(forms.FileField):
default_error_messages = {
'virus_found': _(u"file is infected by virus: %s"),
'virus_engine_error': _(u'unable to initialize scan engine on host %s')
}
def clean(self, data, initial=None):
f = super(yatsFileField, self).clean(initial or data)
if f is None:
return None
elif not data and initial:
return initial
if settings.FILE_UPLOAD_VIRUS_SCAN and pyclamd:
# virus scan
try:
if not hasattr(pyclamd, 'scan_stream'):
cd = pyclamd.ClamdUnixSocket()
else:
pyclamd.init_network_socket('localhost', 3310)
cd = pyclamd<|fim▁hole|> chmod(data.temporary_file_path(), 0o664)
result = cd.scan_file(data.temporary_file_path())
else:
if hasattr(data, 'read'):
result = cd.scan_stream(data.read())
else:
result = cd.scan_stream(data['content'])
except:
from socket import gethostname
raise ValidationError(self.error_messages['virus_engine_error'] % gethostname())
if result:
msg = ' '.join(result[result.keys()[0]]).replace('FOUND ', '')
raise ValidationError(self.error_messages['virus_found'] % msg)
hasher = hashlib.md5()
# We need to get a file object for clamav. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
with open(data.temporary_file_path(), 'rb') as afile:
buf = afile.read()
hasher.update(buf)
else:
if hasattr(data, 'read'):
data.seek(0)
buf = data.read()
hasher.update(buf)
else:
hasher.update(data['content'].read())
f.hash = hasher.hexdigest()
return f<|fim▁end|> |
# We need to get a file object for clamav. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'): |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-<|fim▁hole|>
import invoice<|fim▁end|> | # © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
|
<|file_name|>app.js<|end_file_name|><|fim▁begin|>import React from 'react'
import {
Container,
Group,
TabBar,
Icon,
Badge,
amStyles,
} from 'amazeui-touch'
import {Link} from 'react-router'
class App extends React.Component{
propsType ={
children:React.PropTypes.node
}
render(){
let {
location,
params,
children,
...props
} = this.props;
let transition = children.props.transition || 'sfr'
return (
<Container direction="column">
<Container
transition={transition}
>
{children}
</Container>
<TabBar
>
<TabBar.Item
component={Link}
eventKey = 'home'
active = {location.pathname === '/'}
icon = 'home'
title = '首页'
to='/'
/>
<TabBar.Item
component={Link}
active={location.pathname === '/class'}
eventKey="class"
icon="list"
title="课程"
to='/class'
/>
<TabBar.Item
active={location.pathname === '/search'}
eventKey="search"
icon="search"
title="发现"
/>
<TabBar.Item
component={Link}
active={location.pathname === '/me'}
eventKey="person"
icon="person"
title="我"
to='/me'
/><|fim▁hole|> </TabBar>
</Container>
)
}
}
export default App<|fim▁end|> | |
<|file_name|>logger.rs<|end_file_name|><|fim▁begin|>//! This module is a trimmed-down copy of rtx_core::util::logger,
//! which is still waiting to get released as a crate...
//! maybe there is a simple logger crate that achieves this exact behavior?
use ansi_term::Colour::{Green, Red, White, Yellow};
use ansi_term::Style;
use chrono::Local;
use log::max_level;
use log::{Level, LevelFilter, Metadata, Record, SetLoggerError};
<|fim▁hole|>/// Convenient printing to STDERR (with \n)
#[macro_export]
macro_rules! println_stderr(
($($arg:tt)*) => ({
use std::io::Write;
match writeln!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
})
);
/// Convenient printing to STDERR
#[macro_export]
macro_rules! print_stderr(
($($arg:tt)*) => ({
use std::io::Write;
match write!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
})
);
impl log::Log for RtxLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= max_level()
}
fn log(&self, record: &Record) {
if self.enabled(record.metadata()) {
let record_target = record.target();
let details = record.args();
let category_object = if record_target.is_empty() {
"" // "unknown:unknown" ???
} else {
record_target
};
// Following the reporting syntax at: http://dlmf.nist.gov/LaTeXML/manual/errorcodes/
// let severity = if category_object.starts_with("Fatal:") {
// ""
// } else {
// match record.level() {
// Level::Info => "Info",
// Level::Warn => "Warn",
// Level::Error => "Error",
// Level::Debug => "Debug",
// Level::Trace => "Trace",
// }
// };
let message = format!("{}\t", category_object);
let painted_message = match record.level() {
Level::Info => Green.paint(message),
Level::Warn => Yellow.paint(message),
Level::Error => Red.paint(message),
Level::Debug => Style::default().paint(message),
_ => White.paint(message),
}
.to_string()
+ &details.to_string();
println_stderr!(
"\r[{}] {}",
Local::now().format("%Y-%m-%d %H:%M:%S"),
painted_message
);
}
}
fn flush(&self) {}
}
/// Initialize the logger with an appropriate level of verbosity
pub fn init(level: LevelFilter) -> Result<(), SetLoggerError> {
log::set_logger(&LOGGER).unwrap();
log::set_max_level(level);
Ok(())
}<|fim▁end|> | struct RtxLogger;
static LOGGER: RtxLogger = RtxLogger;
|
<|file_name|>horizontalpodautoscaler.go<|end_file_name|><|fim▁begin|>/*
Copyright 2021 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package horizontalpodautoscaler<|fim▁hole|> context "context"
apiautoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
v2beta1 "k8s.io/client-go/informers/autoscaling/v2beta1"
kubernetes "k8s.io/client-go/kubernetes"
autoscalingv2beta1 "k8s.io/client-go/listers/autoscaling/v2beta1"
cache "k8s.io/client-go/tools/cache"
client "knative.dev/pkg/client/injection/kube/client"
factory "knative.dev/pkg/client/injection/kube/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Autoscaling().V2beta1().HorizontalPodAutoscalers()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
func withDynamicInformer(ctx context.Context) context.Context {
inf := &wrapper{client: client.Get(ctx), resourceVersion: injection.GetResourceVersion(ctx)}
return context.WithValue(ctx, Key{}, inf)
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v2beta1.HorizontalPodAutoscalerInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch k8s.io/client-go/informers/autoscaling/v2beta1.HorizontalPodAutoscalerInformer from context.")
}
return untyped.(v2beta1.HorizontalPodAutoscalerInformer)
}
type wrapper struct {
client kubernetes.Interface
namespace string
resourceVersion string
}
var _ v2beta1.HorizontalPodAutoscalerInformer = (*wrapper)(nil)
var _ autoscalingv2beta1.HorizontalPodAutoscalerLister = (*wrapper)(nil)
func (w *wrapper) Informer() cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(nil, &apiautoscalingv2beta1.HorizontalPodAutoscaler{}, 0, nil)
}
func (w *wrapper) Lister() autoscalingv2beta1.HorizontalPodAutoscalerLister {
return w
}
func (w *wrapper) HorizontalPodAutoscalers(namespace string) autoscalingv2beta1.HorizontalPodAutoscalerNamespaceLister {
return &wrapper{client: w.client, namespace: namespace, resourceVersion: w.resourceVersion}
}
// SetResourceVersion allows consumers to adjust the minimum resourceVersion
// used by the underlying client. It is not accessible via the standard
// lister interface, but can be accessed through a user-defined interface and
// an implementation check e.g. rvs, ok := foo.(ResourceVersionSetter)
func (w *wrapper) SetResourceVersion(resourceVersion string) {
w.resourceVersion = resourceVersion
}
func (w *wrapper) List(selector labels.Selector) (ret []*apiautoscalingv2beta1.HorizontalPodAutoscaler, err error) {
lo, err := w.client.AutoscalingV2beta1().HorizontalPodAutoscalers(w.namespace).List(context.TODO(), v1.ListOptions{
LabelSelector: selector.String(),
ResourceVersion: w.resourceVersion,
})
if err != nil {
return nil, err
}
for idx := range lo.Items {
ret = append(ret, &lo.Items[idx])
}
return ret, nil
}
func (w *wrapper) Get(name string) (*apiautoscalingv2beta1.HorizontalPodAutoscaler, error) {
return w.client.AutoscalingV2beta1().HorizontalPodAutoscalers(w.namespace).Get(context.TODO(), name, v1.GetOptions{
ResourceVersion: w.resourceVersion,
})
}<|fim▁end|> |
import ( |
<|file_name|>view.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
:created: 2014-01-25
:author: Rinze de Laat
:copyright: © 2014 Rinze de Laat, Delmic
This file is part of Odemis.
.. license::
Odemis is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License version 2 as published by the Free
Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
"""
from __future__ import division
import cairo
import itertools
import logging
import math
import numpy
from odemis import util
from odemis.gui.comp.overlay.base import Label
from odemis.util import peak
import wx
import odemis.gui as gui
import odemis.gui.comp.overlay.base as base
import odemis.model as model
import odemis.util.conversion as conversion
import odemis.util.units as units
class TextViewOverlay(base.ViewOverlay):
""" Render the present labels to the screen """
def __init__(self, cnvs):
base.ViewOverlay.__init__(self, cnvs)
def draw(self, ctx):
if self.labels:
self._write_labels(ctx)
class CrossHairOverlay(base.ViewOverlay):
""" Render a static cross hair to the center of the view """
def __init__(self, cnvs, colour=gui.CROSSHAIR_COLOR, size=gui.CROSSHAIR_SIZE):
base.ViewOverlay.__init__(self, cnvs)
self.colour = conversion.hex_to_frgba(colour)
self.size = size
def draw(self, ctx):
""" Draw a cross hair to the Cairo context """
center = self.cnvs.get_half_view_size()
tl = (center[0] - self.size, center[1] - self.size)
br = (center[0] + self.size, center[1] + self.size)
ctx.set_line_width(1)
# Draw shadow
ctx.set_source_rgba(0, 0, 0, 0.9)
ctx.move_to(tl[0] + 1.5, center[1] + 1.5)
ctx.line_to(br[0] + 1.5, center[1] + 1.5)
ctx.move_to(center[0] + 1.5, tl[1] + 1.5)
ctx.line_to(center[0] + 1.5, br[1] + 1.5)
ctx.stroke()
# Draw cross hair
ctx.set_source_rgba(*self.colour)
ctx.move_to(tl[0] + 0.5, center[1] + 0.5)
ctx.line_to(br[0] + 0.5, center[1] + 0.5)
ctx.move_to(center[0] + 0.5, tl[1] + 0.5)
ctx.line_to(center[0] + 0.5, br[1] + 0.5)
ctx.stroke()
class PlayIconOverlay(base.ViewOverlay):
""" Render Stream (play/pause) icons to the view """
opacity = 0.8
def __init__(self, cnvs):
base.ViewOverlay.__init__(self, cnvs)
self.pause = False # if True: displayed
self.play = 0 # opacity of the play icon
self.colour = conversion.hex_to_frgba(gui.FG_COLOUR_HIGHLIGHT, self.opacity)
def hide_pause(self, hidden=True):
""" Hide or show the pause icon """
self.pause = not hidden
if not self.pause:
self.play = 1.0
wx.CallAfter(self.cnvs.Refresh)
def draw(self, ctx):
if self.show:
if self.pause:
self._draw_pause(ctx)
elif self.play:
self._draw_play(ctx)
if self.play > 0:
self.play -= 0.1 # a tenth less
# Force a refresh (without erase background), to cause a new draw
wx.CallLater(50, self.cnvs.Refresh, False) # in 0.05 s
else:
self.play = 0
def _get_dimensions(self):
width = max(16, self.view_width / 10)
height = width
right = self.view_width
bottom = self.view_height
margin = self.view_width / 25
return width, height, right, bottom, margin
def _draw_play(self, ctx):
width, height, right, _, margin = self._get_dimensions()
half_height = height / 2
x = right - margin - width + 0.5
y = margin + 0.5
ctx.set_line_width(1)
ctx.set_source_rgba(
*conversion.hex_to_frgba(
gui.FG_COLOUR_HIGHLIGHT, self.play))
ctx.move_to(x, y)
x = right - margin - 0.5
y += half_height
ctx.line_to(x, y)
x = right - margin - width + 0.5
y += half_height
ctx.line_to(x, y)
ctx.close_path()
ctx.fill_preserve()
ctx.set_source_rgba(0, 0, 0, self.play)
ctx.stroke()
def _draw_pause(self, ctx):
width, height, right, _, margin = self._get_dimensions()
bar_width = max(width / 3, 1)
gap_width = max(width - (2 * bar_width), 1) - 0.5
x = right - margin - bar_width + 0.5
y = margin + 0.5
ctx.set_line_width(1)
ctx.set_source_rgba(*self.colour)
ctx.rectangle(x, y, bar_width, height)
x -= bar_width + gap_width
ctx.rectangle(x, y, bar_width, height)
ctx.set_source_rgba(*self.colour)
ctx.fill_preserve()
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
class FocusOverlay(base.ViewOverlay):
""" Display the focus modification indicator """
def __init__(self, cnvs):
base.ViewOverlay.__init__(self, cnvs)
self.margin = 10
self.line_width = 16
self.shifts = [None, None] # None or float (m)
self.ppm = (5e6, 5e6) # px/m, conversion ratio m -> px
self.focus_label = self.add_label("", align=wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
def draw(self, ctx):
# TODO: Both focuses at the same time, or 'snap' to horizontal/vertical on first motion?
ctx.set_line_width(10)
ctx.set_line_join(cairo.LINE_JOIN_MITER)
ctx.set_source_rgba(1.0, 1.0, 1.0, 0.8)
x, y = self.cnvs.ClientSize
# Horizontal
if self.shifts[0] is not None:
y -= self.margin + (self.line_width // 2)
middle = x / 2
# don't display extremely small values, which are due to accumulation
# of floating point error
shiftm = self.shifts[0]
if abs(shiftm) < 1e-12:
shiftm = 0
shift = shiftm * self.ppm[0]
end_x = middle + (middle * (shift / (x / 2)))
end_x = min(max(self.margin, end_x), x - self.margin)
ctx.move_to(middle, y)
ctx.line_to(end_x, y)
ctx.stroke()
lbl = "focus %s" % units.readable_str(shiftm, 'm', 2)
self.focus_label.text = lbl
self.focus_label.pos = (end_x, y - 15)
self._write_label(ctx, self.focus_label)
# Vertical
if self.shifts[1] is not None:
x -= self.margin + (self.line_width // 2)
middle = y / 2
# don't display extremely small values, which are due to accumulation
# of floating point error
shiftm = self.shifts[1]
if abs(shiftm) < 1e-12:
shiftm = 0
shift = shiftm * self.ppm[1]
end_y = middle - (middle * (shift / (y / 2)))
end_y = min(max(self.margin, end_y), y - self.margin)
ctx.move_to(x, middle)
ctx.line_to(x, end_y)
ctx.stroke()
lbl = "focus %s" % units.readable_str(shiftm, 'm', 2)
self.focus_label.text = lbl
self.focus_label.pos = (x - 15, end_y)
self._write_label(ctx, self.focus_label)
def add_shift(self, shift, axis):
""" Adds a value on the given axis and updates the overlay
shift (float): amount added to the current value (can be negative)
axis (int): axis for which this happens
"""
if self.shifts[axis] is None:
self.shifts[axis] = shift
else:
self.shifts[axis] += shift
self.cnvs.Refresh()
def clear_shift(self):
logging.debug("Clearing focus shift")
self.shifts = [None, None]
self.cnvs.Refresh()
class ViewSelectOverlay(base.ViewOverlay, base.SelectionMixin):
def __init__(self, cnvs, colour=gui.SELECTION_COLOUR, center=(0, 0)):
base.ViewOverlay.__init__(self, cnvs)
base.SelectionMixin.__init__(self, colour, center, base.EDIT_MODE_BOX)
self.position_label = self.add_label("")
def draw(self, ctx, shift=(0, 0), scale=1.0):
if self.select_v_start_pos and self.select_v_end_pos:
start_pos = self.select_v_start_pos
end_pos = self.select_v_end_pos
# logging.debug("Drawing from %s, %s to %s. %s", start_pos[0],
# start_pos[1],
# end_pos[0],
# end_pos[1] )
rect = (start_pos[0] + 0.5,
start_pos[1] + 0.5,
end_pos[0] - start_pos[0],
end_pos[1] - start_pos[1])
# draws a light black background for the rectangle
ctx.set_line_width(2)
ctx.set_source_rgba(0, 0, 0, 0.5)
ctx.rectangle(*rect)
ctx.stroke()
# draws the dotted line
ctx.set_line_width(1.5)
ctx.set_dash([2])
ctx.set_line_join(cairo.LINE_JOIN_MITER)
ctx.set_source_rgba(*self.colour)
ctx.rectangle(*rect)
ctx.stroke()
self._debug_draw_edges(ctx)
self.position_label.pos = start_pos
def on_left_down(self, evt):
""" Start drag action if enabled, otherwise call super method so event will propagate """
if self.active:
base.SelectionMixin._on_left_down(self, evt)
base.ViewOverlay.on_left_down(self, evt)
def on_left_up(self, evt):
""" End drag action if enabled, otherwise call super method so event will propagate """
if self.active:
base.SelectionMixin._on_left_up(self, evt)
base.ViewOverlay.on_left_up(self, evt)
def on_motion(self, evt):
""" Process drag motion if enabled, otherwise call super method so event will propagate """
if self.active:
base.SelectionMixin._on_motion(self, evt)
base.ViewOverlay.on_motion(self, evt)
class MarkingLineOverlay(base.ViewOverlay, base.DragMixin):
""" Draw a vertical line at the given view position
Provides a .val VA indicating the selected position by the user (using mouse).
"""
HORIZONTAL = 1
VERTICAL = 2
def __init__(self, cnvs, colour=gui.SELECTION_COLOUR, orientation=None, map_y_from_x=False):
"""
map_y_from_x (bool): If True, the Y coordinate of the value will be
based on the data, obtained via cnvs.val_x_to_val(), and .val will
contain None as Y => 1D movement.
If False, both X and Y will be based on the mouse position (2D movement).
"""
base.ViewOverlay.__init__(self, cnvs)
base.DragMixin.__init__(self)
self.label = None
self.colour = conversion.hex_to_frgba(colour)
self.map_y_from_x = map_y_from_x
# highlighted position (in the data format, but not necessarily part of the data)
self.val = model.VigilantAttribute(None) # tuple (X, Y) or None
self._x_label = self.add_label("", colour=self.colour)
self._y_label = self.add_label("", colour=self.colour, align=wx.ALIGN_BOTTOM)
self.orientation = orientation or self.HORIZONTAL
self.label_orientation = self.orientation
self.line_width = 2
@property
def x_label(self):
return self._x_label
@x_label.setter
def x_label(self, lbl):
if self.label_orientation & self.VERTICAL:
self._x_label.text = lbl
@property
def y_label(self):
return self._y_label
@y_label.setter
def y_label(self, lbl):
self._y_label.text = lbl
def clear_labels(self):
self.val.value = None
def hide_x_label(self):
self.label_orientation = self.HORIZONTAL
# Event Handlers
def on_left_down(self, evt):
if self.active:
base.DragMixin._on_left_down(self, evt)
self.colour = self.colour[:3] + (0.5,)
self._store_event_pos(evt)
self.cnvs.Refresh()
base.ViewOverlay.on_left_down(self, evt)
def on_left_up(self, evt):
if self.active:
base.DragMixin._on_left_up(self, evt)
self.colour = self.colour[:3] + (1.0,)
self._store_event_pos(evt)
self.cnvs.Refresh()
base.ViewOverlay.on_left_up(self, evt)
def on_motion(self, evt):
if self.active and self.left_dragging:
self._store_event_pos(evt)
self.cnvs.Refresh()
base.ViewOverlay.on_motion(self, evt)
# END Event Handlers
def _store_event_pos(self, evt):
""" Position the focus line at the position of the given mouse event """
x, y = evt.GetPositionTuple()
x = max(1, min(self.view_width, x))
if self.map_y_from_x:
# Y will be automatically mapped at drawing
val = self.cnvs.pos_x_to_val_x(x, snap=False), None
else:
y = max(1, min(self.view_height, y))
val = self.cnvs.pos_to_val((x, y), snap=False)
self.val.value = val
def draw(self, ctx):
ctx.set_line_width(self.line_width)
ctx.set_dash([3])
ctx.set_line_join(cairo.LINE_JOIN_MITER)
ctx.set_source_rgba(*self.colour)
if self.val.value is not None:
val = self.val.value
if self.map_y_from_x:
# Maps Y and also snap X to the closest X value in the data
val = self.cnvs.val_x_to_val(val[0])
v_pos = self.cnvs.val_to_pos(val)
self.x_label = units.readable_str(val[0], self.cnvs.unit_x, 3)
self.y_label = units.readable_str(val[1], self.cnvs.unit_y, 3)
# v_posx, v_posy = self.v_pos.value
if self.orientation & self.VERTICAL:
ctx.move_to(v_pos[0], 0)
ctx.line_to(v_pos[0], self.cnvs.ClientSize.y)
ctx.stroke()
if self.orientation & self.HORIZONTAL:
ctx.move_to(0, v_pos[1])
ctx.line_to(self.cnvs.ClientSize.x, v_pos[1])
ctx.stroke()
if self.x_label.text:
self.x_label.pos = (v_pos[0] + 5, self.cnvs.ClientSize.y)
self._write_label(ctx, self.x_label)
if self.y_label.text:
yp = max(0, v_pos[1] - 5) # Padding from line
# Increase bottom margin if x label is close
label_padding = 30 if v_pos[0] < 50 else 0
yn = min(self.view_height - label_padding, yp)
self.y_label.pos = (2, yn)
self._write_label(ctx, self.y_label)
r, g, b, a = conversion.change_brightness(self.colour, -0.2)
ctx.set_source_rgba(r, g, b, 0.5)
ctx.arc(v_pos[0], v_pos[1], 5.5, 0, 2 * math.pi)
ctx.fill()
class CurveOverlay(base.ViewOverlay, base.DragMixin):
""" Draw a curve at the given view position
"""
def __init__(self, cnvs, colour=gui.FG_COLOUR_CURVE, colour_peaks=gui.FG_COLOUR_PEAK, length=256):
base.ViewOverlay.__init__(self, cnvs)
base.DragMixin.__init__(self)
self.length = length # curve length
self.label = None
self.colour = conversion.hex_to_frgba(colour, 0.5)
self.colour_peaks = conversion.hex_to_frgba(colour_peaks)
# The current highlighted position
self.selected_wl = None # in same unit as the range
self.peaks = None # list of peak data
self.peak_offset = None
self.range = None # array of wl/px
self.unit = None # str
self.type = None # str
# Cached computation of the peak curve. The global curve is index None
self._curves = {} # wavelength/None -> list of values
self.list_labels = []
self.width_labels = []
self.amplitude_labels = []
self.peak_labels = []
self.line_width = 2
# Event Handlers
def on_left_down(self, evt):
if self.active:
base.DragMixin._on_left_down(self, evt)
self._store_event_pos(evt)
self.cnvs.Refresh()
base.ViewOverlay.on_left_down(self, evt)
def on_left_up(self, evt):
if self.active:
base.DragMixin._on_left_up(self, evt)
self._store_event_pos(evt)
self.cnvs.Refresh()
base.ViewOverlay.on_left_up(self, evt)
def on_motion(self, evt):
if self.active and self.left_dragging:
self._store_event_pos(evt)
self.cnvs.Refresh()
base.ViewOverlay.on_motion(self, evt)
# END Event Handlers
def clear_labels(self):
self.peaks = None
def _store_event_pos(self, evt):
""" Position the focus line at the position of the given mouse event """
x, y = evt.GetPositionTuple()
if self.peaks is not None:
# Store in the same format as the data, so it still works after resize
x = max(min(self.view_width, x), 1)
width = self.range[-1] - self.range[0]
self.selected_wl = self.range[0] + x / self.view_width * width
else:
self.selected_wl = None
def update_data(self, peak_data, peak_offset, spectrum_range, unit, type):
"""
peak_data (list of tuple of 3 floats): series of (pos, width, amplitude)
peak_offset (float): initial offset
spectrum_range (list of floats): wavelength/pixel for each pixel in the original spectrum data
unit (str): m or px
type (str): peak fitting method, 'gaussian' or 'lorentzian'
"""
self.peaks = peak_data
self.peak_offset = peak_offset
self.range = spectrum_range
self.unit = unit
self.type = type
self._curves = {} # throw away the cache
self.cnvs.Refresh()
def draw(self, ctx):
peaks = self.peaks
rng = self.range
if (peaks is None) or (self.type is None):
return
# If original range is too small, create a finer one
if len(rng) < self.length * 0.9:
rng = numpy.linspace(rng[0], rng[-1], self.length)
# Compute the label and global curve on the first time needed
if None not in self._curves:
self.width_labels = []
self.amplitude_labels = []
self.peak_labels = []
for pos, width, amplitude in peaks:
self.peak_labels.append(units.readable_str(pos, self.unit, 3))
self.width_labels.append(units.readable_str(width, self.unit, 3))
self.amplitude_labels.append(units.readable_str(amplitude, None, 3))
self._curves[None] = peak.Curve(rng, peaks, self.peak_offset, type=self.type)
curve = self._curves[None]
step = max(1, len(rng) // self.length)
rng_first = rng[0]
rng_last = rng[-1]
rng_n = rng[1::step]
mn, mx = min(curve), max(curve)
if mn == mx:
logging.info("Global peak curve is flat, not displaying")
return
client_size_x = self.cnvs.ClientSize.x
client_size_y = self.cnvs.ClientSize.y
ctx.set_line_width(self.line_width)
ctx.set_dash([3])
ctx.set_line_join(cairo.LINE_JOIN_MITER)
ctx.set_source_rgba(*self.colour)
curve_drawn = []
curve_n = curve[1::step]
for x, y in itertools.izip(rng_n, curve_n):
x_canvas = (((x - rng_first) * (client_size_x - 1)) / (rng_last - rng_first)) + 1
y_canvas = (((y - mn) * (client_size_y - 1)) / (mx - mn)) + 1
y_canvas = client_size_y - y_canvas
ctx.line_to(x_canvas, y_canvas)
curve_drawn.append((x_canvas, y_canvas))
ctx.stroke()
# Draw the peak and peak label
peaks_canvpos = []
# Depends on canvas size so always update
for pos, width, amplitude in peaks:
peaks_canvpos.append(int((((pos - rng_first) * (client_size_x - 1)) / (rng_last - rng_first)) + 1))
ctx.set_source_rgba(*self.colour_peaks)
self.list_labels = []
for p_label, p_pos in zip(self.peak_labels, peaks_canvpos):
ctx.move_to(p_pos - 3, client_size_y)
ctx.line_to(p_pos, client_size_y - 16)
ctx.line_to(p_pos + 3, client_size_y)
ctx.line_to(p_pos - 3, client_size_y)
ctx.fill()
peak_tuple = min(curve_drawn, key=lambda p:abs(p[0] - p_pos))
peak_label = Label(
text=p_label,
pos=(p_pos, peak_tuple[1] - 20),
font_size=12,
flip=True,
align=wx.ALIGN_LEFT | wx.ALIGN_TOP,
colour=self.colour_peaks, # default to white
opacity=1.0,
deg=None
)
self.labels.append(peak_label)
self.list_labels.append(peak_label)
# Draw the peak curve (if the user has selected a wavelength)
if self.selected_wl is not None and peaks:
# Find closest peak
peak_i = util.index_closest(self.selected_wl, [p for (p, w, a) in peaks]) # peak pos
peak_pos = peaks[peak_i][0]
peak_margin = (rng_last - rng_first) / (5 * len(peaks))
if abs(peak_pos - self.selected_wl) <= peak_margin:
if peak_i not in self._curves:
self._curves[peak_i] = peak.Curve(rng, [peaks[peak_i]], self.peak_offset, type=self.type)
single_curve = self._curves[peak_i]
ctx.set_source_rgba(*self.colour)
x_canvas = 1
y_canvas = client_size_y - 1
ctx.move_to(x_canvas, y_canvas)
curve_n = single_curve[1::step]
for x, y in itertools.izip(rng_n, curve_n):
x_canvas = (((x - rng_first) * (client_size_x - 1)) / (rng_last - rng_first)) + 1
y_canvas = (((y - mn) * (client_size_y - 1)) / (mx - mn)) + 1
y_canvas = client_size_y - y_canvas
ctx.line_to(x_canvas, y_canvas)
x_canvas = client_size_x
y_canvas = client_size_y - 1
ctx.line_to(x_canvas, y_canvas)
ctx.fill()
# Add more info to that specific peak label
self.list_labels[peak_i].text += "\nWidth: " + self.width_labels[peak_i] + "\nAmplitude: " + self.amplitude_labels[peak_i]
for pl in self.list_labels:
self._write_label(ctx, pl)
class DichotomyOverlay(base.ViewOverlay):
""" This overlay allows the user to select a sequence of nested quadrants
within the canvas. The quadrants are numbered 0 to 3, from the top left to
the bottom right. The first quadrant is the biggest, with each subsequent
quadrant being nested in the one before it.
"""
TOP_LEFT = 0
TOP_RIGHT = 1
BOTTOM_LEFT = 2
BOTTOM_RIGHT = 3
def __init__(self, cnvs, sequence_va, colour=gui.SELECTION_COLOUR):
""" :param sequence_va: (ListVA) VA to store the sequence in
"""
base.ViewOverlay.__init__(self, cnvs)
self.colour = conversion.hex_to_frgba(colour)
# Color for quadrant that will expand the sequence
self.hover_forw = conversion.hex_to_frgba(colour, 0.5)
# Color for quadrant that will cut the sequence
self.hover_back = conversion.change_brightness(self.hover_forw, -0.2)
self.sequence_va = sequence_va
self.sequence_rect = []
# This attribute is used to track the position of the mouse cursor.
# The first value denotes the smallest quadrant (in size) in the
# sequence and the second one the quadrant index number that will
# be added if the mouse is clicked.
# This value should be set to (None, None) if the mouse is outside the
# canvas or when we are not interested in updating the sequence.
self.hover_pos = (None, None)
# maximum number of sub-quadrants (6->2**6 smaller than the whole area)
self.max_len = 6
self.sequence_va.subscribe(self.on_sequence_change, init=True)
# Disabling the overlay will allow the event handlers to ignore events
self.active = False
def on_sequence_change(self, seq):
if not all([0 <= v <= 3 for v in seq]):
raise ValueError("Illegal quadrant values in sequence!")
rect = 0, 0, self.view_width, self.view_height
self.sequence_rect = [rect]
for i, q in enumerate(seq):
rect = self.index_to_rect(i, q)
self.sequence_rect.append(rect)
self.cnvs.Refresh()
def _reset(self):
""" Reset all attributes to their default values and get the dimensions
from the cnvs canvas.
"""
logging.debug("Reset")
self.sequence_va.value = []
# Event Handlers
def on_leave(self, evt):
""" Event handler called when the mouse cursor leaves the canvas """
if self.active:
# When the mouse cursor leaves the overlay, the current top quadrant
# should be highlighted, so clear the hover_pos attribute.
self.hover_pos = (None, None)
self.cnvs.Refresh()
else:
base.ViewOverlay.on_leave(self, evt)
def on_motion(self, evt):
""" Mouse motion event handler """
if self.active:
self._update_hover(evt.GetPosition())
else:
base.ViewOverlay.on_motion(self, evt)
def on_left_down(self, evt):
""" Prevent the left mouse button event from propagating when the overlay is active"""
if not self.active:
base.ViewOverlay.on_motion(self, evt)
def on_dbl_click(self, evt):
""" Prevent the double click event from propagating if the overlay is active"""
if not self.active:
base.ViewOverlay.on_dbl_click(self, evt)
def on_left_up(self, evt):
""" Mouse button handler """
if self.active:
# If the mouse cursor is over a selectable quadrant
if None not in self.hover_pos:
idx, quad = self.hover_pos
# If we are hovering over the 'top' quadrant, add it to the sequence
if len(self.sequence_va.value) == idx:
new_seq = self.sequence_va.value + [quad]
new_seq = new_seq[:self.max_len] # cut if too long
# Jump to the desired quadrant otherwise, cutting the sequence
else:
# logging.debug("Trim")
new_seq = self.sequence_va.value[:idx] + [quad]
self.sequence_va.value = new_seq
self._update_hover(evt.GetPosition())
else:
base.ViewOverlay.on_leave(self, evt)
def on_size(self, evt):
""" Called when size of canvas changes
"""
# Force the re-computation of rectangles
self.on_sequence_change(self.sequence_va.value)
base.ViewOverlay.on_size(self, evt)
# END Event Handlers
def _update_hover(self, pos):
idx, quad = self.quad_hover(pos)
# Change the cursor into a hand if the quadrant being hovered over
# can be selected. Use the default cursor otherwise
if idx >= self.max_len:
self.cnvs.reset_dynamic_cursor()
idx, quad = (None, None)
else:
self.cnvs.set_dynamic_cursor(wx.CURSOR_HAND)
# Redraw only if the quadrant changed
if self.hover_pos != (idx, quad):
self.hover_pos = (idx, quad)
self.cnvs.Refresh()
def quad_hover(self, vpos):
""" Return the sequence index number of the rectangle at position vpos
and the quadrant vpos is over inside that rectangle.
:param vpos: (int, int) The viewport x,y hover position
"""
# Loop over the rectangles, smallest one first
for i, (x, y, w, h) in reversed(list(enumerate(self.sequence_rect))):
if x <= vpos.x <= x + w:
if y <= vpos.y <= y + h:
# If vpos is within the rectangle, we can determine the
# quadrant.
# Remember that the quadrants are numbered as follows:
#
# 0 | 1
# --+--
# 2 | 3
# Construct the quadrant number by starting with 0
quad = 0
# If the position is in the left half, add 1 to the quadrant
if vpos.x > x + w / 2:
quad += 1
# If the position is in the bottom half, add 2
if vpos.y > y + h / 2:
quad += 2
return i, quad
return None, None
def index_to_rect(self, idx, quad):
""" Translate given rectangle and quadrant into a view rectangle
:param idx: (int) The index number of the rectangle in sequence_rect
that we are going to use as a cnvs.
:param quad: (int) The quadrant number
:return: (int, int, int, int) Rectangle tuple of the form x, y, w, h
"""
x, y, w, h = self.sequence_rect[idx]
# The new rectangle will have half the size of the cnvs one
w /= 2
h /= 2
# If the quadrant is in the right half, construct x by adding half the
# width to x position of the cnvs rectangle.
if quad in (self.TOP_RIGHT, self.BOTTOM_RIGHT):
x += w
# If the quadrant is in the bottom half, construct y by adding half the
# height to the y position of the cnvs rectangle.
if quad in (self.BOTTOM_LEFT, self.BOTTOM_RIGHT):
y += h
return x, y, w, h
def draw(self, ctx):
ctx.set_source_rgba(*self.colour)
ctx.set_line_width(2)
ctx.set_dash([2])
ctx.set_line_join(cairo.LINE_JOIN_MITER)
# Draw previous selections as dashed rectangles
for rect in self.sequence_rect:
# logging.debug("Drawing ", *args, **kwargs)
ctx.rectangle(*rect)
ctx.stroke()
# If the mouse is over the canvas
if None not in self.hover_pos:
idx, quad = self.hover_pos
# If the mouse is over the smallest selected quadrant
if idx == len(self.sequence_va.value):
# Mark quadrant to be added
ctx.set_source_rgba(*self.hover_forw)
rect = self.index_to_rect(idx, quad)
ctx.rectangle(*rect)
ctx.fill()
else:
# Mark higher quadrant to 'jump' to
ctx.set_source_rgba(*self.hover_back)
rect = self.index_to_rect(idx, quad)
ctx.rectangle(*rect)
ctx.fill()
# Mark current quadrant
ctx.set_source_rgba(*self.hover_forw)
ctx.rectangle(*self.sequence_rect[-1])
ctx.fill()
# If the mouse is not over the canvas
elif self.sequence_va.value and self.sequence_rect:
# Mark the currently selected quadrant
ctx.set_source_rgba(*self.hover_forw)
ctx.rectangle(*self.sequence_rect[-1])
ctx.fill()
class PolarOverlay(base.ViewOverlay):
def __init__(self, cnvs):
base.ViewOverlay.__init__(self, cnvs)
self.canvas_padding = 0
# Rendering attributes
self.center_x = None
self.center_y = None
self.radius = None
self.inner_radius = None
self.tau = 2 * math.pi
self.num_ticks = 6
self.ticks = []
self.ticksize = 10
# Value attributes
self.px, self.py = None, None
self.tx, self.ty = None, None
self.colour = conversion.hex_to_frgb(gui.SELECTION_COLOUR)
self.colour_drag = conversion.hex_to_frgba(gui.SELECTION_COLOUR, 0.5)
self.colour_highlight = conversion.hex_to_frgb(gui.FG_COLOUR_HIGHLIGHT)
self.intensity_label = self.add_label("", align=wx.ALIGN_CENTER_HORIZONTAL,
colour=self.colour_highlight)
self.phi = None # Phi angle in radians
self.phi_line_rad = None # Phi drawing angle in radians (is phi -90)
self.phi_line_pos = None # End point in pixels of the Phi line
self.phi_label = self.add_label("", colour=self.colour,
align=wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_BOTTOM)
self.theta = None # Theta angle in radians
self.theta_radius = None # Radius of the theta circle in pixels
self.theta_label = self.add_label("", colour=self.colour,
align=wx.ALIGN_CENTER_HORIZONTAL)
self.intersection = None # The intersection of the circle and line in pixels
self.dragging = False
# Calculate the characteristic values for the first time
self.on_size()
# Property Getters/Setters
@property
def phi_rad(self):
return self.phi
@phi_rad.setter
def phi_rad(self, phi_rad):
self.phi = phi_rad
self._calculate_phi()
self.cnvs.Refresh()
@property
def phi_deg(self):
return math.degrees(self.phi)
@phi_deg.setter
def phi_deg(self, phi_deg):
self.phi_rad = math.radians(phi_deg)
@property
def theta_rad(self):
return self.theta
@theta_rad.setter
def theta_rad(self, theta_rad):
self.theta = theta_rad
self.theta_radius = (theta_rad / (math.pi / 2)) * self.inner_radius
self._calculate_theta()
self.cnvs.Refresh()
@property
def theta_deg(self):
return math.degrees(self.theta)
@theta_deg.setter
def theta_deg(self, theta_deg):
self.theta_rad = math.radians(theta_deg)
# END Property Getters/Setters
def _calculate_phi(self, view_pos=None):
""" Calcualate the Phi angle and the values to display the Phi line """
if view_pos:
vx, vy = view_pos
dx, dy = vx - self.center_x, self.center_y - vy
# Calculate the phi angle in radians
# Atan2 gives the angle between the positive x axis and the point
# dx,dy
self.phi = math.atan2(dx, dy) % self.tau
if self.phi:
self.phi_line_rad = self.phi - math.pi / 2
cos_phi_line = math.cos(self.phi_line_rad)
sin_phi_line = math.sin(self.phi_line_rad)
# Pixel to which to draw the Phi line to
phi_x = self.center_x + self.radius * cos_phi_line
phi_y = self.center_y + self.radius * sin_phi_line
self.phi_line_pos = (phi_x, phi_y)
# Calc Phi label pos
# Calculate the view point on the line where to place the label
if self.theta_radius > self.inner_radius / 2:
radius = self.inner_radius * 0.25
else:
radius = self.inner_radius * 0.75
x = self.center_x + radius * cos_phi_line
y = self.center_y + radius * sin_phi_line
self.phi_label.text = u"φ %0.1f°" % math.degrees(self.phi)
self.phi_label.deg = math.degrees(self.phi_line_rad)
# Now we calculate a perpendicular offset to the Phi line where
# we can plot the label. It is also determined if the label should
# flip, depending on the angle.
if self.phi < math.pi:
ang = -math.pi / 2.0 # -45 deg
self.phi_label.flip = False
else:
ang = math.pi / 2.0 # 45 deg
self.phi_label.flip = True
# Calculate a point further down the line that we will rotate
# around the calculated label x,y. By translating (-x and -y) we
# 'move' the origin to label x,y
rx = (self.center_x - x) + (radius + 5) * cos_phi_line
ry = (self.center_y - y) + (radius + 5) * sin_phi_line
# Apply the rotation
lx = rx * math.cos(ang) - ry * math.sin(ang)
ly = rx * math.sin(ang) + ry * math.cos(ang)
# Translate back to our original origin
lx += x
ly += y
self.phi_label.pos = (lx, ly)
def _calculate_theta(self, view_pos=None):
""" Calculate the Theta angle and the values needed to display it. """
if view_pos:
vx, vy = view_pos
dx, dy = vx - self.center_x, self.center_y - vy
# Get the radius and the angle for Theta
self.theta_radius = min(math.sqrt(dx * dx + dy * dy),
self.inner_radius)
self.theta = (math.pi / 2) * (self.theta_radius / self.inner_radius)
elif self.theta:
self.theta_radius = (self.theta / (math.pi / 2)) * self.inner_radius
else:
return
# Calc Theta label pos
x = self.center_x
y = self.center_y + self.theta_radius + 3
theta_str = u"θ %0.1f°" % math.degrees(self.theta)
self.theta_label.text = theta_str
self.theta_label.pos = (x, y)
def _calculate_intersection(self):
if None not in (self.phi_line_rad, self.theta_radius):
# Calculate the intersection between Phi and Theta
x = self.center_x + self.theta_radius * math.cos(self.phi_line_rad)
y = self.center_y + self.theta_radius * math.sin(self.phi_line_rad)
self.intersection = (x, y)
else:
self.intersection = None
def _calculate_display(self, view_pos=None):
""" Calculate the values needed for plotting the Phi and Theta lines and labels
If view_pos is not given, the current Phi and Theta angles will be used.
"""
self._calculate_phi(view_pos)
self._calculate_theta(view_pos)
self._calculate_intersection()
# if (view_pos and 0 < self.intersection[0] < self.cnvs.ClientSize.x and
# 0 < self.intersection[1] < self.cnvs.ClientSize.y):
# # FIXME: Determine actual value here
# #self.intensity_label.text = ""
# pass
# Event Handlers
def on_left_down(self, evt):
if self.active:
self.dragging = True
base.ViewOverlay.on_left_down(self, evt)
def on_left_up(self, evt):
if self.active:
self._calculate_display(evt.GetPositionTuple())
self.dragging = False
self.cnvs.Refresh()
base.ViewOverlay.on_left_up(self, evt)
def on_motion(self, evt):
# Only change the values when the user is dragging
if self.active and self.dragging:
self._calculate_display(evt.GetPositionTuple())
self.cnvs.Refresh()
else:
base.ViewOverlay.on_motion(self, evt)
def on_enter(self, evt):
if self.active:
self.cnvs.set_default_cursor(wx.CROSS_CURSOR)
else:
base.ViewOverlay.on_enter(self, evt)
def on_leave(self, evt):
if self.active:
self.cnvs.reset_default_cursor()
else:
base.ViewOverlay.on_leave(self, evt)
def on_size(self, evt=None):
# Calculate the characteristic values
self.center_x = self.cnvs.ClientSize.x / 2
self.center_y = self.cnvs.ClientSize.y / 2
self.inner_radius = min(self.center_x, self.center_y)
self.radius = self.inner_radius + (self.ticksize / 1.5)
self.ticks = []
# Top middle
for i in range(self.num_ticks):
# phi needs to be rotated 90 degrees counter clockwise, otherwise
# 0 degrees will be at the right side of the circle
phi = (self.tau / self.num_ticks * i) - (math.pi / 2)
deg = round(math.degrees(phi))
cos = math.cos(phi)
sin = math.sin(phi)
# Tick start and end poiint (outer and inner)
ox = self.center_x + self.radius * cos
oy = self.center_y + self.radius * sin
ix = self.center_x + (self.radius - self.ticksize) * cos
iy = self.center_y + (self.radius - self.ticksize) * sin
# Tick label positions
lx = self.center_x + (self.radius + 5) * cos
ly = self.center_y + (self.radius + 5) * sin
label = self.add_label(u"%d°" % (deg + 90),
(lx, ly),
colour=(0.8, 0.8, 0.8),
deg=deg - 90,
flip=True,
align=wx.ALIGN_CENTRE_HORIZONTAL | wx.ALIGN_BOTTOM)
self.ticks.append((ox, oy, ix, iy, label))
self._calculate_display()
if evt:
base.ViewOverlay.on_size(self, evt)
# END Event Handlers
def draw(self, ctx):
# Draw angle lines
ctx.set_line_width(2.5)
ctx.set_source_rgba(0, 0, 0, 0.2 if self.dragging else 0.5)
if self.theta is not None:
# Draw dark underline azimuthal circle
ctx.arc(self.center_x, self.center_y,
self.theta_radius, 0, self.tau)
ctx.stroke()
if self.phi is not None:
# Draw dark underline Phi line
ctx.move_to(self.center_x, self.center_y)
ctx.line_to(*self.phi_line_pos)
ctx.stroke()
# Light selection lines formatting
ctx.set_line_width(2)
ctx.set_dash([3])
if self.dragging:
ctx.set_source_rgba(*self.colour_drag)
else:
ctx.set_source_rgb(*self.colour)
if self.theta is not None:
# Draw azimuthal circle
ctx.arc(self.center_x, self.center_y,
self.theta_radius, 0, self.tau)
ctx.stroke()
self._write_label(ctx, self.theta_label)
if self.phi is not None:
# Draw Phi line
ctx.move_to(self.center_x, self.center_y)
ctx.line_to(*self.phi_line_pos)
ctx.stroke()
self._write_label(ctx, self.phi_label)
ctx.set_dash([])
# ## Draw angle markings ###
# Draw frame that covers everything outside the center circle
ctx.set_fill_rule(cairo.FILL_RULE_EVEN_ODD)
ctx.set_source_rgb(0.2, 0.2, 0.2)
ctx.rectangle(0, 0, self.cnvs.ClientSize.x, self.cnvs.ClientSize.y)
ctx.arc(self.center_x, self.center_y, self.inner_radius, 0, self.tau)
# mouse_inside = not ctx.in_fill(float(self.vx or 0), float(self.vy or 0))
ctx.fill()
# Draw Azimuth degree circle
ctx.set_line_width(2)
ctx.set_source_rgb(0.5, 0.5, 0.5)
ctx.arc(self.center_x, self.center_y, self.radius, 0, self.tau)
ctx.stroke()
# Draw Azimuth degree ticks
ctx.set_line_width(1)
for sx, sy, lx, ly, _ in self.ticks:
ctx.move_to(sx, sy)
ctx.line_to(lx, ly)
ctx.stroke()
# Draw tick labels, ignore padding in this case
pad, self.canvas_padding = self.canvas_padding, 0
for _, _, _, _, label in self.ticks:
self._write_label(ctx, label)
self.canvas_padding = pad
if self.intensity_label.text and self.intersection:
ctx.set_source_rgb(*self.colour_highlight)
ctx.arc(self.intersection[0], self.intersection[1], 3, 0, self.tau)
ctx.fill()
x, y = self.intersection
y -= 18
if y < 40:
y += 40
self.intensity_label.pos = (x, y)
self._write_label(ctx, self.intensity_label)
class PointSelectOverlay(base.ViewOverlay):
""" Overlay for the selection of canvas points in view, world and physical coordinates """
def __init__(self, cnvs):
base.ViewOverlay.__init__(self, cnvs)
# Prevent the cursor from resetting on clicks
# Physical position of the last click
self.v_pos = model.VigilantAttribute(None)
self.w_pos = model.VigilantAttribute(None)
self.p_pos = model.VigilantAttribute(None)
# Event Handlers
def on_enter(self, evt):
if self.active:
self.cnvs.set_default_cursor(wx.CROSS_CURSOR)
else:
base.ViewOverlay.on_enter(self, evt)
def on_leave(self, evt):
if self.active:
self.cnvs.reset_default_cursor()
else:
base.ViewOverlay.on_leave(self, evt)
def on_left_down(self, evt):
if not self.active:
base.ViewOverlay.on_left_down(self, evt)
def on_left_up(self, evt):
if self.active:
v_pos = evt.GetPositionTuple()
w_pos = self.cnvs.view_to_world(v_pos, self.cnvs.get_half_buffer_size())
self.v_pos.value = v_pos
self.w_pos.value = w_pos
self.p_pos.value = self.cnvs.world_to_physical_pos(w_pos)
logging.debug("Point selected (view, world, physical): %s, %s, %s)",
self.v_pos.value, self.w_pos.value, self.p_pos.value)
else:
base.ViewOverlay.on_left_up(self, evt)
# END Event Handlers
def draw(self, ctx):
pass
class HistoryOverlay(base.ViewOverlay):
""" Display rectangles on locations that the microscope was previously positioned at """
def __init__(self, cnvs, history_list_va):
base.ViewOverlay.__init__(self, cnvs)
self.trail_colour = conversion.hex_to_frgb(gui.FG_COLOUR_HIGHLIGHT)
self.pos_colour = conversion.hex_to_frgb(gui.FG_COLOUR_EDIT)
self.fade = True # Fade older positions in the history list
self.history = history_list_va # ListVA of (center, size) tuples
self.history.subscribe(self._on_history_update)
def __str__(self):
return "History (%d): \n" % len(self) + "\n".join([str(h) for h in self.history.value[-5:]])
# # Event Handlers
#
# def on_enter(self, evt):
# base.ViewOverlay.on_enter(self, evt)
# self.cnvs.Refresh()
#
# def on_leave(self, evt):
# base.ViewOverlay.on_leave(self, evt)
# self.cnvs.Refresh()
#
# # END Event Handlers
# TODO: might need rate limiter (but normally stage position is changed rarely)
# TODO: Make the update of the canvas image the responsibility of the viewport
def _on_history_update(self, _):
self.cnvs.update_drawing()
def draw(self, ctx, scale=None, shift=None):
"""
scale (0<float): ratio between the canvas pixel size and the pixel size
of the drawing area. That's a trick to allow drawing both on the
standard view and directly onto the thumbnail.
shift (float, float): offset to add for positioning the drawing, when
it is scaled
"""
ctx.set_line_width(1)
offset = self.cnvs.get_half_buffer_size()
for i, (p_center, p_size) in enumerate(self.history.value):
alpha = (i + 1) * (0.8 / len(self.history.value)) + 0.2 if self.fade else 1.0
v_center = self.cnvs.world_to_view(self.cnvs.physical_to_world_pos(p_center), offset)
if scale:
v_center = (shift[0] + v_center[0] * scale,
shift[1] + v_center[1] * scale)
marker_size = (2, 2)
elif p_size:
marker_size = (int(p_size[0] * self.cnvs.scale),
int(p_size[0] * self.cnvs.scale))
# Prevent the marker from becoming too small
if marker_size[0] < 2 or marker_size[1] < 2:
marker_size = (3, 3)
else:
marker_size = (5, 5)
if i < len(self.history.value) - 1:
colour = self.trail_colour
else:
colour = self.pos_colour
self._draw_rect(ctx, v_center, marker_size, colour, alpha)
@staticmethod
def _draw_rect(ctx, v_center, v_size, colour, alpha):
ctx.set_source_rgba(0, 0, 0, alpha * 0.4)
x = int(v_center[0] - v_size[0] / 2.0) + 0.5
y = int(v_center[1] - v_size[1] / 2.0) + 0.5
ctx.rectangle(x + 1, y + 1, v_size[0], v_size[1])
ctx.stroke()
ctx.set_source_rgba(colour[0], colour[1], colour[2], alpha)
# Render rectangles of 3 pixels wide
ctx.rectangle(x, y, v_size[0], v_size[1])
ctx.stroke()
class SpotModeOverlay(base.ViewOverlay, base.DragMixin, base.SpotModeBase):
""" Render the spot mode indicator in the center of the view
If a position is provided, the spot will be drawn there.
If the overlay is activated, the user can use the mouse cursor to select a position
"""
def __init__(self, cnvs, spot_va=None):
base.ViewOverlay.__init__(self, cnvs)
base.DragMixin.__init__(self)
base.SpotModeBase.__init__(self, cnvs, spot_va=spot_va)
self.v_pos = None
def on_spot_change(self, _):
self._r_to_v()
def on_size(self, evt):<|fim▁hole|> base.ViewOverlay.on_size(self, evt)
def _v_to_r(self):
if self.v_pos is None:
self.r_pos.value = (0.5, 0.5)
else:
self.r_pos.value = (
float(self.v_pos[0] / self.cnvs.view_width),
float(self.v_pos[1] / self.cnvs.view_height)
)
def _r_to_v(self):
try:
self.v_pos = (
int(self.cnvs.view_width * self.r_pos.value[0]),
int(self.cnvs.view_height * self.r_pos.value[1])
)
except (TypeError, KeyError):
self.v_pos = None
def draw(self, ctx, shift=(0, 0), scale=1.0):
if self.v_pos is None:
return
vx, vy = self.v_pos
base.SpotModeBase.draw(self, ctx, vx, vy)
def activate(self):
self._r_to_v()
base.ViewOverlay.activate(self)
def deactivate(self):
self.v_pos = None
base.ViewOverlay.deactivate(self)<|fim▁end|> | self._r_to_v() |
<|file_name|>promisenativehandler.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::PromiseNativeHandlerBinding;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::root::DomRoot;
use dom::bindings::trace::JSTraceable;
use dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use js::jsapi::{JSContext, HandleValue};
use malloc_size_of::MallocSizeOf;
pub trait Callback: JSTraceable + MallocSizeOf {
fn callback(&self, cx: *mut JSContext, v: HandleValue);
}
#[dom_struct]
pub struct PromiseNativeHandler {
reflector: Reflector,
resolve: Option<Box<Callback>>,
reject: Option<Box<Callback>>,
}
<|fim▁hole|>impl PromiseNativeHandler {
pub fn new(global: &GlobalScope,
resolve: Option<Box<Callback>>,
reject: Option<Box<Callback>>)
-> DomRoot<PromiseNativeHandler> {
reflect_dom_object(Box::new(PromiseNativeHandler {
reflector: Reflector::new(),
resolve: resolve,
reject: reject,
}), global, PromiseNativeHandlerBinding::Wrap)
}
fn callback(callback: &Option<Box<Callback>>, cx: *mut JSContext, v: HandleValue) {
if let Some(ref callback) = *callback {
callback.callback(cx, v)
}
}
pub fn resolved_callback(&self, cx: *mut JSContext, v: HandleValue) {
PromiseNativeHandler::callback(&self.resolve, cx, v)
}
pub fn rejected_callback(&self, cx: *mut JSContext, v: HandleValue) {
PromiseNativeHandler::callback(&self.reject, cx, v)
}
}<|fim▁end|> | |
<|file_name|>DurationConverter.java<|end_file_name|><|fim▁begin|>package org.fastnate.generator.converter;<|fim▁hole|>import java.time.Duration;
import org.fastnate.generator.context.GeneratorContext;
import org.fastnate.generator.statements.ColumnExpression;
import org.fastnate.generator.statements.PrimitiveColumnExpression;
/**
* Converts a {@link Duration} to an SQL expression.
*
* @author Tobias Liefke
*/
public class DurationConverter implements ValueConverter<Duration> {
@Override
public ColumnExpression getExpression(final Duration value, final GeneratorContext context) {
return PrimitiveColumnExpression.create(value.toNanos(), context.getDialect());
}
@Override
public ColumnExpression getExpression(final String defaultValue, final GeneratorContext context) {
return getExpression(Duration.parse(defaultValue), context);
}
}<|fim▁end|> | |
<|file_name|>images.py<|end_file_name|><|fim▁begin|>import os, logging
from PIL import Image
from sqlalchemy.orm.session import object_session
from sqlalchemy.orm.util import identity_key
from iktomi.unstable.utils.image_resizers import ResizeFit
from iktomi.utils import cached_property
from ..files import TransientFile, PersistentFile
from .files import FileEventHandlers, FileProperty
logger = logging.getLogger(__name__)
class ImageFile(PersistentFile):
def _get_properties(self, properties=['width', 'height']):
if 'width' in properties or 'height' in properties:
image = Image.open(self.path)
self.width, self.height = image.size
@cached_property
def width(self):
self._get_properties(['width'])
return self.width
@cached_property
def height(self):
self._get_properties(['height'])
return self.height
class ImageEventHandlers(FileEventHandlers):
def _2persistent(self, target, transient):
# XXX move this method to file_manager
# XXX Do this check or not?
image = Image.open(transient.path)
assert image.format in Image.SAVE and image.format != 'bmp',\
'Unsupported image format'
if self.prop.image_sizes:
session = object_session(target)
persistent_name = getattr(target, self.prop.attribute_name)
pn, ext = os.path.splitext(persistent_name)
image_crop = self.prop.resize(image, self.prop.image_sizes)
if self.prop.force_rgb and image_crop.mode not in ['RGB', 'RGBA']:
image_crop = image_crop.convert('RGB')
if ext == '.gif':
image_crop.format = 'jpeg'
ext = '.jpeg'
if self.prop.enhancements:
for enhance, factor in self.prop.enhancements:
image_crop = enhance(image_crop).enhance(factor)
if self.prop.filter:
image_crop = image_crop.filter(self.prop.filter)
if not ext:
# set extension if it is not set
ext = '.' + image.format.lower()
if pn + ext != persistent_name:
persistent_name = pn + ext
# XXX hack?
setattr(target, self.prop.attribute_name, persistent_name)
image_attr = getattr(target.__class__, self.prop.key)
file_manager = persistent = session.find_file_manager(image_attr)
persistent = file_manager.get_persistent(persistent_name,
self.prop.persistent_cls)
transient = session.find_file_manager(image_attr).new_transient(ext)
kw = dict(quality=self.prop.quality)
if self.prop.optimize:
kw = dict(kw, optimize=True)
image_crop.save(transient.path, **kw)
session.find_file_manager(image_attr).store(transient, persistent)
return persistent
else:
# Attention! This method can accept PersistentFile.
# In this case one shold NEVER been deleted or rewritten.
assert isinstance(transient, TransientFile), repr(transient)
return FileEventHandlers._2persistent(self, target, transient)
def before_update(self, mapper, connection, target):
FileEventHandlers.before_update(self, mapper, connection, target)
self._fill_img(mapper, connection, target)
def before_insert(self, mapper, connection, target):
FileEventHandlers.before_insert(self, mapper, connection, target)
self._fill_img(mapper, connection, target)
def _fill_img(self, mapper, connection, target):
if self.prop.fill_from:<|fim▁hole|> base = getattr(target, self.prop.fill_from)
if base is None:
return
if not os.path.isfile(base.path):
logger.warn('Original file is absent %s %s %s',
identity_key(instance=target),
self.prop.fill_from,
base.path)
return
ext = os.path.splitext(base.name)[1]
session = object_session(target)
image_attr = getattr(target.__class__, self.prop.key)
name = session.find_file_manager(image_attr).new_file_name(
self.prop.name_template, target, ext, '')
setattr(target, self.prop.attribute_name, name)
persistent = self._2persistent(target, base)
setattr(target, self.prop.key, persistent)
class ImageProperty(FileProperty):
event_cls = ImageEventHandlers
def _set_options(self, options):
# XXX rename image_sizes?
options = dict(options)
self.image_sizes = options.pop('image_sizes', None)
self.resize = options.pop('resize', None) or ResizeFit()
# XXX implement
self.fill_from = options.pop('fill_from', None)
self.filter = options.pop('filter', None)
self.enhancements = options.pop('enhancements', [])
self.force_rgb = self.enhancements or \
self.filter or \
options.pop('force_rgb', True)
self.quality = options.pop('quality', 85)
self.optimize = options.pop('optimize', False)
assert self.fill_from is None or self.image_sizes is not None
options.setdefault('persistent_cls', ImageFile)
FileProperty._set_options(self, options)<|fim▁end|> | # XXX Looks hacky
value = getattr(target, self.prop.key)
if value is None: |
<|file_name|>test_vis_bbox.py<|end_file_name|><|fim▁begin|>import unittest
import numpy as np
from chainer import testing
from chainercv.utils import generate_random_bbox
from chainercv.visualizations import vis_bbox
try:
import matplotlib # NOQA
_available = True
except ImportError:
_available = False
@testing.parameterize(
*testing.product_dict([
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': None,
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': None},
{
'n_bbox': 3, 'label': None, 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': None, 'score': (0, 0.5, 1),
'label_names': None},
{
'n_bbox': 3, 'label': None, 'score': None,
'label_names': None},
{
'n_bbox': 3, 'label': (0, 1, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 0, 'label': (), 'score': (),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2'), 'no_img': True},
{<|fim▁hole|> 'instance_colors': [
(255, 0, 0), (0, 255, 0), (0, 0, 255), (100, 100, 100)]},
], [{'sort_by_score': False}, {'sort_by_score': True}]))
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisBbox(unittest.TestCase):
def setUp(self):
if hasattr(self, 'no_img'):
self.img = None
else:
self.img = np.random.randint(0, 255, size=(3, 32, 48))
self.bbox = generate_random_bbox(
self.n_bbox, (48, 32), 8, 16)
if self.label is not None:
self.label = np.array(self.label, dtype=int)
if self.score is not None:
self.score = np.array(self.score)
if not hasattr(self, 'instance_colors'):
self.instance_colors = None
def test_vis_bbox(self):
ax = vis_bbox(
self.img, self.bbox, self.label, self.score,
label_names=self.label_names,
instance_colors=self.instance_colors,
sort_by_score=self.sort_by_score)
self.assertIsInstance(ax, matplotlib.axes.Axes)
@testing.parameterize(*testing.product_dict([
{
'n_bbox': 3, 'label': (0, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1, 0.75),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 3), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (-1, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
], [{'sort_by_score': False}, {'sort_by_score': True}]))
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisBboxInvalidInputs(unittest.TestCase):
def setUp(self):
self.img = np.random.randint(0, 255, size=(3, 32, 48))
self.bbox = np.random.uniform(size=(self.n_bbox, 4))
if self.label is not None:
self.label = np.array(self.label, dtype=int)
if self.score is not None:
self.score = np.array(self.score)
if not hasattr(self, 'instance_colors'):
self.instance_colors = None
def test_vis_bbox_invalid_inputs(self):
with self.assertRaises(ValueError):
vis_bbox(
self.img, self.bbox, self.label, self.score,
label_names=self.label_names,
instance_colors=self.instance_colors,
sort_by_score=self.sort_by_score)
testing.run_module(__name__, __file__)<|fim▁end|> | 'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2'), |
<|file_name|>types.py<|end_file_name|><|fim▁begin|>import json
from jaspyx.visitor import BaseVisitor
class Types(BaseVisitor):
def visit_Num(self, node):
self.output(json.dumps(node.n))
<|fim▁hole|> def visit_List(self, node):
self.group(node.elts, prefix='[', infix=', ', suffix=']')
visit_Tuple = visit_List<|fim▁end|> | def visit_Str(self, node):
self.output(json.dumps(node.s))
|
<|file_name|>x86_bmi2_mulx.rs<|end_file_name|><|fim▁begin|>extern crate bitintr;
use bitintr::*;
#[no_mangle]
pub fn umulx_u32(x: u32, y: u32) -> (u32, u32) {
x.mulx(y)
}
#[no_mangle]
pub fn umulx_u64(x: u64, y: u64) -> (u64, u64) {
x.mulx(y)<|fim▁hole|><|fim▁end|> | } |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>"""
Python implementation of the matrix information measurement examples from the
StackExchange answer written by WilliamAHuber for
"Measuring entropy/ information/ patterns of a 2d binary matrix"
http://stats.stackexchange.com/a/17556/43909
Copyright 2014 Cosmo Harrigan
This program is free software, distributed under the terms of the GNU LGPL v3.0
"""
__author__ = 'Cosmo Harrigan'
from matplotlib import pyplot
from neighborhood_functions import avg_components
from moving_window_filter import moving_window_filter
from calculate_profile import profile
# Function to apply
F = avg_components
# Define the matrices as input_matrices
from data import *
# Iterate over the input matrices
for m in range(0, len(input_matrices)):
active_matrix = input_matrices[m]
print("---------\nMatrix #{0}\n---------\n".format(m))<|fim▁hole|> # Produce the filtered matrices at varying scales and the associated
# entropy "profiles"
matrices = []
for n in range(1, min(active_matrix.shape)):
output_matrix = moving_window_filter(matrix=active_matrix,
f=F,
neighborhood_size=n)
matrices.append(output_matrix)
subplot = pyplot.subplot(5, 4, m * 4 + n)
pyplot.axis('off')
pyplot.imshow(output_matrix,
interpolation='nearest',
cmap='Greys_r',
vmin=0,
vmax=1)
print("Neighborhood size = {0}\n{1}\n".format(n, output_matrix))
print("Profile:\n{0}\n".format(profile(matrices)))
pyplot.show()<|fim▁end|> | |
<|file_name|>ExampleUnitTest.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2017 Martin Pfeffer
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.celox.androiddemos.jsonlayout;
import org.junit.Test;<|fim▁hole|>
/**
* Example local unit test, which will execute on the development machine (host).
*
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
*/
public class ExampleUnitTest {
@Test
public void addition_isCorrect() throws Exception {
assertEquals(4, 2 + 2);
}
}<|fim▁end|> |
import static org.junit.Assert.*; |
<|file_name|>MD5.java<|end_file_name|><|fim▁begin|>package com.naosim.rtm.lib;
import java.math.BigInteger;
import java.security.MessageDigest;
public class MD5 {
public static String md5(String str) {
try {
byte[] str_bytes = str.getBytes("UTF-8");
MessageDigest md = MessageDigest.getInstance("MD5");<|fim▁hole|> }catch(Exception e){
throw new RuntimeException(e);
}
}
}<|fim▁end|> | byte[] md5_bytes = md.digest(str_bytes);
BigInteger big_int = new BigInteger(1, md5_bytes);
return big_int.toString(16); |
<|file_name|>ex16-3.py<|end_file_name|><|fim▁begin|>from sys import argv
script, filename = argv
print "We're going to erase %r." % filename
print "If you don't want that, hit CTRL-C (^C)."
print "If you do want that, hit RETURN."
raw_input("?")
print "Opening the file..."
target = open(filename,'w')
print "Truncating the file. Goodbye!"
target.truncate()
print "Now I'm going to ask you for three lines."
line1 = raw_input("line 1:")
line2 = raw_input("line 2:")
line3 = raw_input("line 3:")
print "I'm going to write these to the file."
<|fim▁hole|>
print "And finally, we close it."
target.close()<|fim▁end|> | content = "%s\n%s\n%s\n" % (line1, line2, line3)
target.write(content) |
<|file_name|>stata.py<|end_file_name|><|fim▁begin|>import pandas as pd
from larray.core.array import Array
from larray.inout.pandas import from_frame
__all__ = ['read_stata']
def read_stata(filepath_or_buffer, index_col=None, sort_rows=False, sort_columns=False, **kwargs) -> Array:
r"""
Reads Stata .dta file and returns an Array with the contents
Parameters
----------
filepath_or_buffer : str or file-like object
Path to .dta file or a file handle.
index_col : str or None, optional
Name of column to set as index. Defaults to None.
sort_rows : bool, optional
Whether or not to sort the rows alphabetically (sorting is more efficient than not sorting).
This only makes sense in combination with index_col. Defaults to False.
sort_columns : bool, optional
Whether or not to sort the columns alphabetically (sorting is more efficient than not sorting).
Defaults to False.
Returns
-------
Array
See Also
--------
Array.to_stata
Notes
-----
The round trip to Stata (Array.to_stata followed by read_stata) loose the name of the "column" axis.
Examples
--------
>>> read_stata('test.dta') # doctest: +SKIP<|fim▁hole|> >>> read_stata('test.dta', index_col='row') # doctest: +SKIP
row\{1} country sex
0 BE F
1 FR M
2 FR F
"""
df = pd.read_stata(filepath_or_buffer, index_col=index_col, **kwargs)
return from_frame(df, sort_rows=sort_rows, sort_columns=sort_columns)<|fim▁end|> | {0}\{1} row country sex
0 0 BE F
1 1 FR M
2 2 FR F |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(std_misc)]
use std::dynamic_lib::DynamicLibrary;
use std::path::Path;
pub fn main() {
unsafe {
let path = Path::new("libdylib.so");
let a = DynamicLibrary::open(Some(&path)).unwrap();
assert!(a.symbol::<isize>("fun1").is_ok());
assert!(a.symbol::<isize>("fun2").is_err());
assert!(a.symbol::<isize>("fun3").is_err());<|fim▁hole|>}<|fim▁end|> | assert!(a.symbol::<isize>("fun4").is_ok());
assert!(a.symbol::<isize>("fun5").is_ok());
} |
<|file_name|>recipe-59892.py<|end_file_name|><|fim▁begin|># Ensure variable is defined
try:
x
except NameError:
x = None<|fim▁hole|>if x is None:
some_fallback_operation()
else:
some_operation(x)<|fim▁end|> |
# Test whether variable is defined to be None |
<|file_name|>postMessage.js<|end_file_name|><|fim▁begin|>/*
Module handles message posting.
*/
'use strict';
const AWS = require('aws-sdk'),
Joi = require('joi'),
config = require('../environments/config'),
dynamodb = new AWS.DynamoDB({ region: 'us-east-1' });
/* Joi validation object */
const postMessageValidate = Joi.object().keys({
UserName: Joi
.string()
.not('')
.default('Anonymous')
.description('Name of user who posts the message.'),
MessageBody: Joi
.string()
.not('')
.description('The message content.'),
Extra: Joi
.string()
.default('')
.description('Any additional info about the message.')
}).requiredKeys(
'UserName', 'MessageBody'
);
<|fim▁hole|> let item = {
'UserName': {
S: query.UserName
},
'TimeStamp': {
S: curTime.toString()
},
'MessageBody': {
S: query.MessageBody
}
};
if(query.Extra && query.Extra !== ''){
item['Extra'] = {
S: query.Extra
};
}
return {
TableName: config.dynamodb,
Item: item
};
};
/* Handler for postMessage */
const postMessage = (req, resp) => {
let query = req.query,
params = postMessageParams(query);
dynamodb.putItem(params, (err, data) => {
if(err){
console.log('ERR: ' + err);
resp({Error: err}).code(400);
} else {
resp({mete: {status: "Success"}, data: data}).code(200);
}
});
};
module.exports = {
postMessageParams,
config: {
handler: postMessage,
description: 'Allow user to post a message.',
tags: ['api'],
validate: {
query: postMessageValidate
}
}
};<|fim▁end|> | /* params for postMessage */
const postMessageParams = (query) => {
let curTime = (new Date()).getTime(); |
<|file_name|>CasKryoTranscoderTests.java<|end_file_name|><|fim▁begin|>package org.apereo.cas.memcached.kryo;
import org.apereo.cas.authentication.AcceptUsersAuthenticationHandler;
import org.apereo.cas.authentication.AuthenticationBuilder;
import org.apereo.cas.authentication.BasicCredentialMetaData;
import org.apereo.cas.authentication.DefaultAuthenticationBuilder;
import org.apereo.cas.authentication.DefaultAuthenticationHandlerExecutionResult;
import org.apereo.cas.authentication.UsernamePasswordCredential;
import org.apereo.cas.authentication.principal.DefaultPrincipalFactory;
import org.apereo.cas.mock.MockServiceTicket;
import org.apereo.cas.mock.MockTicketGrantingTicket;
import org.apereo.cas.services.RegisteredServiceTestUtils;
import org.apereo.cas.ticket.TicketGrantingTicket;
import org.apereo.cas.ticket.TicketGrantingTicketImpl;
import org.apereo.cas.ticket.support.MultiTimeUseOrTimeoutExpirationPolicy;
import org.apereo.cas.ticket.support.NeverExpiresExpirationPolicy;
import com.esotericsoftware.kryo.KryoException;
import lombok.extern.slf4j.Slf4j;
import lombok.val;
import org.junit.Test;
import javax.security.auth.login.AccountNotFoundException;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import static org.junit.Assert.*;
/**
* Unit test for {@link CasKryoTranscoder} class.
*
* @author Marvin S. Addison
* @since 3.0.0
*/
@Slf4j
public class CasKryoTranscoderTests {
private static final String ST_ID = "ST-1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890ABCDEFGHIJK";
private static final String TGT_ID = "TGT-1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890ABCDEFGHIJK-cas1";
private static final String USERNAME = "handymanbob";
private static final String PASSWORD = "foo";
private static final String NICKNAME_KEY = "nickname";
private static final String NICKNAME_VALUE = "bob";
private final CasKryoTranscoder transcoder;
private final Map<String, Object> principalAttributes;
public CasKryoTranscoderTests() {
val classesToRegister = new ArrayList<Class>();
classesToRegister.add(MockServiceTicket.class);
classesToRegister.add(MockTicketGrantingTicket.class);
this.transcoder = new CasKryoTranscoder(new CasKryoPool(classesToRegister));
this.principalAttributes = new HashMap<>();
this.principalAttributes.put(NICKNAME_KEY, NICKNAME_VALUE);
}
@Test
public void verifyEncodeDecodeTGTImpl() {
val userPassCredential = new UsernamePasswordCredential(USERNAME, PASSWORD);
final AuthenticationBuilder bldr = new DefaultAuthenticationBuilder(new DefaultPrincipalFactory()
.createPrincipal("user", new HashMap<>(this.principalAttributes)));
bldr.setAttributes(new HashMap<>(this.principalAttributes));
bldr.setAuthenticationDate(ZonedDateTime.now());
bldr.addCredential(new BasicCredentialMetaData(userPassCredential));
bldr.addFailure("error", new AccountNotFoundException());
bldr.addSuccess("authn", new DefaultAuthenticationHandlerExecutionResult(
new AcceptUsersAuthenticationHandler(""),
new BasicCredentialMetaData(userPassCredential)));
final TicketGrantingTicket expectedTGT = new TicketGrantingTicketImpl(TGT_ID,
RegisteredServiceTestUtils.getService(),
null, bldr.build(),
new NeverExpiresExpirationPolicy());
val ticket = expectedTGT.grantServiceTicket(ST_ID,
RegisteredServiceTestUtils.getService(),
new NeverExpiresExpirationPolicy(), false, true);
val result1 = transcoder.encode(expectedTGT);
val resultTicket = transcoder.decode(result1);
assertEquals(expectedTGT, resultTicket);
val result2 = transcoder.encode(ticket);
val resultStTicket1 = transcoder.decode(result2);
assertEquals(ticket, resultStTicket1);
val resultStTicket2 = transcoder.decode(result2);
assertEquals(ticket, resultStTicket2);
}
@Test
public void verifyEncodeDecode() {
val tgt = new MockTicketGrantingTicket(USERNAME);
val expectedST = new MockServiceTicket(ST_ID, RegisteredServiceTestUtils.getService(), tgt);
assertEquals(expectedST, transcoder.decode(transcoder.encode(expectedST)));
val expectedTGT = new MockTicketGrantingTicket(USERNAME);
expectedTGT.grantServiceTicket(ST_ID, null, null, false, true);
val result = transcoder.encode(expectedTGT);
assertEquals(expectedTGT, transcoder.decode(result));
assertEquals(expectedTGT, transcoder.decode(result));
internalProxyTest();
}
private void internalProxyTest() {
val expectedTGT = new MockTicketGrantingTicket(USERNAME);
expectedTGT.grantServiceTicket(ST_ID, null, null, false, true);
val result = transcoder.encode(expectedTGT);
assertEquals(expectedTGT, transcoder.decode(result));
assertEquals(expectedTGT, transcoder.decode(result));
}
@Test
public void verifyEncodeDecodeTGTWithUnmodifiableMap() {
val userPassCredential = new UsernamePasswordCredential(USERNAME, PASSWORD);
final TicketGrantingTicket expectedTGT =
new MockTicketGrantingTicket(TGT_ID, userPassCredential, new HashMap<>(this.principalAttributes));
expectedTGT.grantServiceTicket(ST_ID, null, null, false, true);
val result = transcoder.encode(expectedTGT);
assertEquals(expectedTGT, transcoder.decode(result));
assertEquals(expectedTGT, transcoder.decode(result));
}
@Test
public void verifyEncodeDecodeTGTWithUnmodifiableList() {
val userPassCredential = new UsernamePasswordCredential(USERNAME, PASSWORD);
val values = new ArrayList<String>();
values.add(NICKNAME_VALUE);
val newAttributes = new HashMap<String, Object>();
newAttributes.put(NICKNAME_KEY, new ArrayList<>(values));
val expectedTGT = new MockTicketGrantingTicket(TGT_ID, userPassCredential, newAttributes);
expectedTGT.grantServiceTicket(ST_ID, null, null, false, true);
val result = transcoder.encode(expectedTGT);
assertEquals(expectedTGT, transcoder.decode(result));
assertEquals(expectedTGT, transcoder.decode(result));
}
@Test
public void verifyEncodeDecodeTGTWithLinkedHashMap() {
val userPassCredential = new UsernamePasswordCredential(USERNAME, PASSWORD);
final TicketGrantingTicket expectedTGT =
new MockTicketGrantingTicket(TGT_ID, userPassCredential, new LinkedHashMap<>(this.principalAttributes));
expectedTGT.grantServiceTicket(ST_ID, null, null, false, true);
val result = transcoder.encode(expectedTGT);
assertEquals(expectedTGT, transcoder.decode(result));
assertEquals(expectedTGT, transcoder.decode(result));
}
@Test
public void verifyEncodeDecodeTGTWithListOrderedMap() {
val userPassCredential = new UsernamePasswordCredential(USERNAME, PASSWORD);
final TicketGrantingTicket expectedTGT =
new MockTicketGrantingTicket(TGT_ID, userPassCredential, this.principalAttributes);
expectedTGT.grantServiceTicket(ST_ID, null, null, false, true);
val result = transcoder.encode(expectedTGT);
assertEquals(expectedTGT, transcoder.decode(result));
assertEquals(expectedTGT, transcoder.decode(result));
}
@Test
public void verifyEncodeDecodeTGTWithUnmodifiableSet() {
val newAttributes = new HashMap<String, Object>();
val values = new HashSet<String>();
values.add(NICKNAME_VALUE);
//CHECKSTYLE:OFF
newAttributes.put(NICKNAME_KEY, Collections.unmodifiableSet(values));
//CHECKSTYLE:ON
val userPassCredential = new UsernamePasswordCredential(USERNAME, PASSWORD);
val expectedTGT = new MockTicketGrantingTicket(TGT_ID, userPassCredential, newAttributes);
expectedTGT.grantServiceTicket(ST_ID, null, null, false, true);
val result = transcoder.encode(expectedTGT);
assertEquals(expectedTGT, transcoder.decode(result));
assertEquals(expectedTGT, transcoder.decode(result));
}
@Test
public void verifyEncodeDecodeTGTWithSingleton() {
val newAttributes = new HashMap<String, Object>();
newAttributes.put(NICKNAME_KEY, Collections.singleton(NICKNAME_VALUE));
val userPassCredential = new UsernamePasswordCredential(USERNAME, PASSWORD);
val expectedTGT = new MockTicketGrantingTicket(TGT_ID, userPassCredential, newAttributes);
expectedTGT.grantServiceTicket(ST_ID, null, null, false, true);
val result = transcoder.encode(expectedTGT);
assertEquals(expectedTGT, transcoder.decode(result));
assertEquals(expectedTGT, transcoder.decode(result));
}
@Test
public void verifyEncodeDecodeTGTWithSingletonMap() {
val newAttributes = Collections.<String, Object>singletonMap(NICKNAME_KEY, NICKNAME_VALUE);
val userPassCredential = new UsernamePasswordCredential(USERNAME, PASSWORD);
val expectedTGT = new MockTicketGrantingTicket(TGT_ID, userPassCredential, newAttributes);
expectedTGT.grantServiceTicket(ST_ID, null, null, false, true);
val result = transcoder.encode(expectedTGT);
assertEquals(expectedTGT, transcoder.decode(result));
assertEquals(expectedTGT, transcoder.decode(result));
}
@Test
public void verifyEncodeDecodeRegisteredService() {
val service = RegisteredServiceTestUtils.getRegisteredService("helloworld");
val result = transcoder.encode(service);
assertEquals(service, transcoder.decode(result));
assertEquals(service, transcoder.decode(result));
}
@Test
public void verifySTWithServiceTicketExpirationPolicy() {<|fim▁hole|> transcoder.getKryo().getClassResolver().reset();
val tgt = new MockTicketGrantingTicket(USERNAME);
val expectedST = new MockServiceTicket(ST_ID, RegisteredServiceTestUtils.getService(), tgt);
val step
= new MultiTimeUseOrTimeoutExpirationPolicy.ServiceTicketExpirationPolicy(1, 600);
expectedST.setExpiration(step);
val result = transcoder.encode(expectedST);
assertEquals(expectedST, transcoder.decode(result));
// Test it a second time - Ensure there's no problem with subsequent de-serializations.
assertEquals(expectedST, transcoder.decode(result));
}
@Test
public void verifyEncodeDecodeNonRegisteredClass() {
val tgt = new MockTicketGrantingTicket(USERNAME);
val expectedST = new MockServiceTicket(ST_ID, RegisteredServiceTestUtils.getService(), tgt);
// This class is not registered with Kryo
val step = new UnregisteredServiceTicketExpirationPolicy(1, 600);
expectedST.setExpiration(step);
try {
transcoder.encode(expectedST);
throw new AssertionError("Unregistered class is not allowed by Kryo");
} catch (final KryoException e) {
LOGGER.trace(e.getMessage(), e);
} catch (final Exception e) {
throw new AssertionError("Unexpected exception due to not resetting Kryo between de-serializations with unregistered class.");
}
}
/**
* Class for testing Kryo unregistered class handling.
*/
private static class UnregisteredServiceTicketExpirationPolicy extends MultiTimeUseOrTimeoutExpirationPolicy {
private static final long serialVersionUID = -1704993954986738308L;
/**
* Instantiates a new Service ticket expiration policy.
*
* @param numberOfUses the number of uses
* @param timeToKillInSeconds the time to kill in seconds
*/
UnregisteredServiceTicketExpirationPolicy(final int numberOfUses, final long timeToKillInSeconds) {
super(numberOfUses, timeToKillInSeconds);
}
}
}<|fim▁end|> | // ServiceTicketExpirationPolicy is not registered with Kryo... |
<|file_name|>setup.js<|end_file_name|><|fim▁begin|>/**
* Pre Tests
* Check to make sure jQuery and Zest are loaded
*/
module("Setup");
test("jQuery is loaded", function() {
expect(3);
ok( jQuery,
"jQuery is defined." );
ok( $,
"$ is defined.");
equal( typeof jQuery, "function",
"jQuery is a function." );
});
test("Zest is loaded", function() {
expect(3);
ok( Zest,
"Zest is defined." );
ok( Z$,
"Z$ is defined." );
equal( typeof Zest, "function",<|fim▁hole|><|fim▁end|> | "Zest is a function." );
}); |
<|file_name|>compiler.py<|end_file_name|><|fim▁begin|># pylint: disable=W0401<|fim▁hole|><|fim▁end|> | from django.db.backends.oracle.compiler import * |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
class Profile(models.Model):<|fim▁hole|><|fim▁end|> | user = models.OneToOneField(User)
description = models.TextField(blank=True, null=True) |
<|file_name|>common.cpp<|end_file_name|><|fim▁begin|>/*
読み太(yomita), a USI shogi (Japanese chess) playing engine derived from
Stockfish 7 & YaneuraOu mid 2016 V3.57
Copyright (C) 2004-2008 Tord Romstad (Glaurung author)
Copyright (C) 2008-2015 Marco Costalba, Joona Kiiski, Tord Romstad (Stockfish author)
Copyright (C) 2015-2016 Marco Costalba, Joona Kiiski, Gary Linscott, Tord Romstad (Stockfish author)
Copyright (C) 2015-2016 Motohiro Isozaki(YaneuraOu author)
Copyright (C) 2016-2017 Ryuzo Tukamoto
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _CRT_SECURE_NO_WARNINGS 1
#ifdef _WIN32
#if _WIN32_WINNT < 0x0601
#undef _WIN32_WINNT
#define _WIN32_WINNT 0x0601
#endif
#ifdef _MSC_VER
#include <windows.h>
#endif
extern "C"
{
typedef bool(*fun1_t)(LOGICAL_PROCESSOR_RELATIONSHIP,
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX, PDWORD);
typedef bool(*fun2_t)(USHORT, PGROUP_AFFINITY);
typedef bool(*fun3_t)(HANDLE, CONST GROUP_AFFINITY*, PGROUP_AFFINITY);
}
#endif
#include <vector>
#include <fstream>
#include <iomanip>
#include <sstream>
#include <iostream>
#include <codecvt>
#ifndef _MSC_VER
#include <sys/stat.h>
#include <sys/types.h>
#endif
#include "common.h"
#include "thread.h"
using namespace std;
namespace WinProcGroup {
#ifndef _MSC_VER
void bindThisThread(size_t) {};
#else<|fim▁hole|> {
int threads = 0;
int nodes = 0;
int cores = 0;
DWORD return_length = 0;
DWORD byte_offset = 0;
HMODULE k32 = GetModuleHandle(TEXT("Kernel32.dll"));
auto fun1 = (fun1_t)GetProcAddress(k32, "GetLogicalProcessorInformationEx");
if (!fun1)
return -1;
if (fun1(RelationAll, nullptr, &return_length))
return -1;
SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *buffer, *ptr;
ptr = buffer = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*)malloc(return_length);
if (!fun1(RelationAll, buffer, &return_length))
{
free(buffer);
return -1;
}
while (ptr->Size > 0 && byte_offset + ptr->Size <= return_length)
{
if (ptr->Relationship == RelationNumaNode)
nodes++;
else if (ptr->Relationship == RelationProcessorCore)
{
cores++;
threads += (ptr->Processor.Flags == LTP_PC_SMT) ? 2 : 1;
}
byte_offset += ptr->Size;
ptr = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*)(((char*)ptr) + ptr->Size);
}
free(buffer);
std::vector<int> groups;
for (int n = 0; n < nodes; n++)
for (int i = 0; i < cores / nodes; i++)
groups.push_back(n);
for (int t = 0; t < threads - cores; t++)
groups.push_back(t % nodes);
return idx < groups.size() ? groups[idx] : -1;
}
void bindThisThread(size_t idx)
{
if (Threads.size() < 8)
return;
int group = getGroup(idx);
if (group == -1)
return;
HMODULE k32 = GetModuleHandle(TEXT("Kernel32.dll"));
auto fun2 = (fun2_t)GetProcAddress(k32, "GetNumaNodeProcessorMaskEx");
auto fun3 = (fun3_t)GetProcAddress(k32, "SetThreadGroupAffinity");
if (!fun2 || !fun3)
return;
GROUP_AFFINITY affinity;
if (fun2(group, &affinity))
fun3(GetCurrentThread(), &affinity, nullptr);
}
#endif
} // namespace WinProcGroup
// logging用のhack。streambufをこれでhookしてしまえば追加コードなしで普通に
// cinからの入力とcoutへの出力をファイルにリダイレクトできる。
// cf. http://groups.google.com/group/comp.lang.c++/msg/1d941c0f26ea0d81
struct Tie : public streambuf
{
Tie(streambuf* buf_, streambuf* log_) : buf(buf_), log(log_) {}
int sync() { return log->pubsync(), buf->pubsync(); }
int overflow(int c) { return write(buf->sputc((char)c), "<< "); }
int underflow() { return buf->sgetc(); }
int uflow() { return write(buf->sbumpc(), ">> "); }
int write(int c, const char* prefix) {
static int last = '\n';
if (last == '\n')
log->sputn(prefix, 3);
return last = log->sputc((char)c);
}
streambuf *buf, *log; // 標準入出力 , ログファイル
};
struct Logger
{
static void start(bool b)
{
static Logger log;
if (b && !log.file.is_open())
{
log.file.open("io_log.txt", ifstream::out);
cin.rdbuf(&log.in);
cout.rdbuf(&log.out);
cout << "start logger" << endl;
}
else if (!b && log.file.is_open())
{
cout << "end logger" << endl;
cout.rdbuf(log.out.buf);
cin.rdbuf(log.in.buf);
log.file.close();
}
}
private:
Tie in, out; // 標準入力とファイル、標準出力とファイルのひも付け
ofstream file; // ログを書き出すファイル
Logger() : in(cin.rdbuf(), file.rdbuf()), out(cout.rdbuf(), file.rdbuf()) {}
~Logger() { start(false); }
};
void startLogger(bool b) { Logger::start(b); }
// ファイルを丸読みする。ファイルが存在しなくともエラーにはならない。空行はスキップする。
int readAllLines(std::string filename, std::vector<std::string>& lines)
{
fstream fs(filename, ios::in);
if (fs.fail())
return 1; // 読み込み失敗
while (!fs.fail() && !fs.eof())
{
std::string line;
getline(fs, line);
if (line.length())
lines.push_back(line);
}
fs.close();
return 0;
}
// 現在の日にち、曜日、時刻を表す文字列を返す。
std::string localTime()
{
auto now = std::chrono::system_clock::now();
auto tp = std::chrono::system_clock::to_time_t(now);
return std::ctime(&tp);
}
// YYYYMMDD形式で現在時刻を秒まで。
std::string timeStamp()
{
char buff[20] = "";
time_t now = time(NULL);
struct tm *pnow = localtime(&now);
sprintf(buff, "%04d%02d%02d%02d%02d%02d", pnow->tm_year + 1900, pnow->tm_mon + 1, pnow->tm_mday,
pnow->tm_hour, pnow->tm_min, pnow->tm_sec);
return std::string(buff);
}
std::string path(const std::string& folder, const std::string& filename)
{
if (folder.length() >= 1 && *folder.rbegin() != '/' && *folder.rbegin() != '\\')
return folder + "/" + filename;
return folder + filename;
}
void _mkdir(std::string dir)
{
#ifdef _MSC_VER
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> cv;
if (_wmkdir(cv.from_bytes(dir).c_str()) == -1)
#else
#ifdef _WIN32
if (mkdir(dir.c_str()) == -1)
#else
if (mkdir(dir.c_str(),
S_IRUSR | S_IWUSR | S_IXUSR |
S_IRGRP | S_IWGRP | S_IXGRP |
S_IROTH | S_IXOTH | S_IXOTH ) == -1)
#endif
#endif
{
if (errno == EEXIST)
std::cout << "ディレクトリは dirname が既存のファイル、ディレクトリ、またはデバイスの名前であるため生成されませんでした。" << std::endl;
else if (errno == ENOENT)
std::cout << "パスが見つかりませんでした。" << std::endl;
}
}<|fim▁end|> | int getGroup(size_t idx) |
<|file_name|>Prereq.py<|end_file_name|><|fim▁begin|>from Database.Controllers.Disciplina import Disciplina
class Prereq(object):
def __init__(self,dados=None):
if dados is not None:
self.id = dados ['id']
self.grupo = dados ['grupo']
self.id_disc_pre = dados ['id_disc_pre']
def getId(self):
return self.id
def setGrupo(self,grupo):
self.grupo = grupo
def getGrupo(self):<|fim▁hole|> self.id_disc_pre = (Disciplina().pegarDisciplina('nome = %s',(disc_pre,))).getId()
def getId_disc_pre(self):
return self.id_disc_pre
def getDisc_pre(self):
return (Disciplina().pegarDisciplina('id = %s',(self.id_disc_pre,))).getNome()<|fim▁end|> | return self.grupo
def setId_disc_pre(self,disc_pre): |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | default_app_config = 'hs_tracking.apps.HSTrackingAppConfig' |
<|file_name|>test_api_task_import.py<|end_file_name|><|fim▁begin|>import os
import time
import io
import requests
from django.contrib.auth.models import User
from guardian.shortcuts import remove_perm, assign_perm
from rest_framework import status
from rest_framework.test import APIClient
import worker
from app.cogeo import valid_cogeo
from app.models import Project
from app.models import Task
from app.tests.classes import BootTransactionTestCase
from app.tests.utils import clear_test_media_root, start_processing_node
from nodeodm import status_codes
from nodeodm.models import ProcessingNode
from webodm import settings<|fim▁hole|>class TestApiTask(BootTransactionTestCase):
def setUp(self):
super().setUp()
clear_test_media_root()
def test_task(self):
client = APIClient()
with start_processing_node():
user = User.objects.get(username="testuser")
self.assertFalse(user.is_superuser)
project = Project.objects.create(
owner=user,
name="test project"
)
image1 = open("app/fixtures/tiny_drone_image.jpg", 'rb')
image2 = open("app/fixtures/tiny_drone_image_2.jpg", 'rb')
# Create processing node
pnode = ProcessingNode.objects.create(hostname="localhost", port=11223)
client.login(username="testuser", password="test1234")
# Create task
res = client.post("/api/projects/{}/tasks/".format(project.id), {
'images': [image1, image2]
}, format="multipart")
image1.close()
image2.close()
task = Task.objects.get(id=res.data['id'])
# Wait for completion
c = 0
while c < 10:
worker.tasks.process_pending_tasks()
task.refresh_from_db()
if task.status == status_codes.COMPLETED:
break
c += 1
time.sleep(1)
self.assertEqual(task.status, status_codes.COMPLETED)
# Download task assets
task_uuid = task.uuid
res = client.get("/api/projects/{}/tasks/{}/download/all.zip".format(project.id, task.id))
self.assertEqual(res.status_code, status.HTTP_200_OK)
if not os.path.exists(settings.MEDIA_TMP):
os.mkdir(settings.MEDIA_TMP)
assets_path = os.path.join(settings.MEDIA_TMP, "all.zip")
with open(assets_path, 'wb') as f:
f.write(res.content)
remove_perm('change_project', user, project)
assets_file = open(assets_path, 'rb')
# Cannot import unless we have permission
res = client.post("/api/projects/{}/tasks/import".format(project.id), {
'file': [assets_file]
}, format="multipart")
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
assign_perm('change_project', user, project)
# Import with file upload method
assets_file.seek(0)
res = client.post("/api/projects/{}/tasks/import".format(project.id), {
'file': [assets_file]
}, format="multipart")
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
assets_file.close()
file_import_task = Task.objects.get(id=res.data['id'])
# Wait for completion
c = 0
while c < 10:
worker.tasks.process_pending_tasks()
file_import_task.refresh_from_db()
if file_import_task.status == status_codes.COMPLETED:
break
c += 1
time.sleep(1)
self.assertEqual(file_import_task.import_url, "file://all.zip")
self.assertEqual(file_import_task.images_count, 1)
self.assertEqual(file_import_task.processing_node, None)
self.assertEqual(file_import_task.auto_processing_node, False)
# Can access assets
res = client.get("/api/projects/{}/tasks/{}/assets/odm_orthophoto/odm_orthophoto.tif".format(project.id, file_import_task.id))
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertTrue(valid_cogeo(file_import_task.assets_path(task.ASSETS_MAP["orthophoto.tif"])))
self.assertTrue(valid_cogeo(file_import_task.assets_path(task.ASSETS_MAP["dsm.tif"])))
self.assertTrue(valid_cogeo(file_import_task.assets_path(task.ASSETS_MAP["dtm.tif"])))
# Set task public so we can download from it without auth
file_import_task.public = True
file_import_task.save()
# Import with URL method
assets_import_url = "http://{}:{}/task/{}/download/all.zip".format(pnode.hostname, pnode.port, task_uuid)
res = client.post("/api/projects/{}/tasks/import".format(project.id), {
'url': assets_import_url
})
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
url_task = Task.objects.get(id=res.data['id'])
# Wait for completion
c = 0
while c < 10:
worker.tasks.process_pending_tasks()
url_task.refresh_from_db()
if url_task.status == status_codes.COMPLETED:
break
c += 1
time.sleep(1)
self.assertEqual(url_task.import_url, assets_import_url)
self.assertEqual(url_task.images_count, 1)
# Import corrupted file
assets_import_url = "http://{}:{}/task/{}/download/orthophoto.tif".format(pnode.hostname, pnode.port, task_uuid)
res = client.post("/api/projects/{}/tasks/import".format(project.id), {
'url': assets_import_url
})
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
corrupted_task = Task.objects.get(id=res.data['id'])
# Wait for completion
c = 0
while c < 10:
worker.tasks.process_pending_tasks()
corrupted_task.refresh_from_db()
if corrupted_task.status == status_codes.FAILED:
break
c += 1
time.sleep(1)
self.assertEqual(corrupted_task.status, status_codes.FAILED)
self.assertTrue("Invalid" in corrupted_task.last_error)<|fim▁end|> | |
<|file_name|>StreamableRDDTest_Failures.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2012 - 2020 Splice Machine, Inc.
*
* This file is part of Splice Machine.
* Splice Machine is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either
* version 3, or (at your option) any later version.
* Splice Machine is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Public License for more details.
* You should have received a copy of the GNU Affero General Public License along with Splice Machine.
* If not, see <http://www.gnu.org/licenses/>.
*/
package com.splicemachine.stream;
import splice.com.google.common.net.HostAndPort;
import com.splicemachine.db.iapi.error.StandardException;
import com.splicemachine.db.iapi.sql.execute.ExecRow;
import com.splicemachine.derby.impl.SpliceSpark;
import com.splicemachine.derby.stream.BaseStreamTest;
import org.apache.commons.collections.IteratorUtils;
import org.apache.log4j.Logger;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import scala.Tuple2;
import java.io.FileNotFoundException;
import java.io.PrintWriter;
import java.io.Serializable;
import java.io.UnsupportedEncodingException;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import static org.junit.Assert.*;
/**
* Created by dgomezferro on 6/1/16.
*/
public class StreamableRDDTest_Failures extends BaseStreamTest implements Serializable {
private static final Logger LOG = Logger.getLogger(StreamableRDDTest_Failures.class);
private static StreamListenerServer server;
@BeforeClass
public static void setup() throws StandardException {
server = new StreamListenerServer(0);
server.start();
}
@BeforeClass
public static void startSpark() {
SpliceSpark.getContextUnsafe();
}
@AfterClass
public static void stopSpark() {
SpliceSpark.getContextUnsafe().stop();
}
@Before
public void reset() {
FailsFunction.reset();
FailsTwiceFunction.reset();
}
@Test
public void testBasicStream() throws Exception {
StreamListener<ExecRow> sl = new StreamListener<>();
HostAndPort hostAndPort = server.getHostAndPort();
server.register(sl);
JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(tenRows, 2).mapToPair(new FailsFunction(3));
StreamableRDD srdd = new StreamableRDD(rdd.values(), sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort());
srdd.submit();
Iterator<ExecRow> it = sl.getIterator();
int count = 0;
while (it.hasNext()) {
ExecRow execRow = it.next();
LOG.trace(execRow);
count++;
assertNotNull(execRow);
assertTrue(execRow.getColumn(1).getInt() < 10);
}
assertEquals(10, count);
}
@Test
public void testFailureBoundary() throws Exception {
StreamListener<ExecRow> sl = new StreamListener<>();
HostAndPort hostAndPort = server.getHostAndPort();
server.register(sl);
JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(tenRows, 20).mapToPair(new FailsFunction(4));
StreamableRDD srdd = new StreamableRDD(rdd.values(), sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort());
srdd.submit();
Iterator<ExecRow> it = sl.getIterator();
int count = 0;
while (it.hasNext()) {
ExecRow execRow = it.next();
LOG.trace(execRow);
count++;
assertNotNull(execRow);
assertTrue(execRow.getColumn(1).getInt() < 10);
}
assertEquals(10, count);
}
@Test
public void testBlockingMedium() throws StandardException, FileNotFoundException, UnsupportedEncodingException {
int size = 20000;
int batches = 2;
int batchSize = 512;
StreamListener<ExecRow> sl = new StreamListener<>(-1, 0, batches, batchSize);
HostAndPort hostAndPort = server.getHostAndPort();
server.register(sl);
List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>();
for(int i = 0; i < size; ++i) {
manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2)));
}
JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 2).sortByKey().mapToPair(new FailsFunction(5000));
final StreamableRDD srdd = new StreamableRDD(rdd.values(), null, sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort(), batches, batchSize);
new Thread() {
@Override
public void run() {
try {
srdd.submit();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}.start();
Iterator<ExecRow> it = sl.getIterator();
int count = 0;
while (it.hasNext()) {
ExecRow execRow = it.next();
assertNotNull(execRow);
count++;
}
assertEquals(size, count);
}
@Test
public void testBlockingLarge() throws StandardException, FileNotFoundException, UnsupportedEncodingException {
int size = 100000;
int batches = 2;
int batchSize = 512;
StreamListener<ExecRow> sl = new StreamListener<>(-1, 0, batches, batchSize);
HostAndPort hostAndPort = server.getHostAndPort();
server.register(sl);
List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>();
for(int i = 0; i < size; ++i) {
manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2)));
}
JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 12).sortByKey().mapToPair(new FailsFunction(10000));
final StreamableRDD srdd = new StreamableRDD(rdd.values(), null, sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort(), batches, batchSize);
new Thread() {
@Override
public void run() {
try {
srdd.submit();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}.start();
Iterator<ExecRow> it = sl.getIterator();
int count = 0;
while (it.hasNext()) {
ExecRow execRow = it.next();
assertNotNull(execRow);
count++;
}
assertEquals(size, count);
}
@Test
public void testFailureBeforeLargeOffset() throws StandardException {
StreamListener<ExecRow> sl = new StreamListener<>(400, 30000);
HostAndPort hostAndPort = server.getHostAndPort();
server.register(sl);
List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>();
for(int i = 0; i < 100000; ++i) {
manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2)));
}
JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 13).mapToPair(new FailsFunction(29500));;
final StreamableRDD srdd = new StreamableRDD(rdd.values(), sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort());
new Thread() {
@Override
public void run() {
try {
srdd.submit();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}.start();
Iterator<ExecRow> it = sl.getIterator();
int count = 0;
int first = 30000;
while (it.hasNext()) {
ExecRow execRow = it.next();
assertNotNull(execRow);
assertEquals(count+first, execRow.getColumn(1).getInt());
count++;
}
assertEquals(400, count);
}
@Test<|fim▁hole|> HostAndPort hostAndPort = server.getHostAndPort();
server.register(sl);
List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>();
for(int i = 0; i < 100000; ++i) {
manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2)));
}
JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 13).mapToPair(new FailsFunction(200));;
final StreamableRDD srdd = new StreamableRDD(rdd.values(), sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort());
new Thread() {
@Override
public void run() {
try {
srdd.submit();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}.start();
Iterator<ExecRow> it = sl.getIterator();
int count = 0;
int first = 300;
while (it.hasNext()) {
ExecRow execRow = it.next();
assertNotNull(execRow);
assertEquals(count+first, execRow.getColumn(1).getInt());
count++;
}
assertEquals(40000, count);
}
@Test
public void testFailureAfterOffset() throws StandardException {
StreamListener<ExecRow> sl = new StreamListener<>(40000, 300);
HostAndPort hostAndPort = server.getHostAndPort();
server.register(sl);
List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>();
for(int i = 0; i < 100000; ++i) {
manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2)));
}
JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 13).mapToPair(new FailsFunction(14000));;
final StreamableRDD srdd = new StreamableRDD(rdd.values(), sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort());
new Thread() {
@Override
public void run() {
try {
srdd.submit();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}.start();
Iterator<ExecRow> it = sl.getIterator();
int count = 0;
int first = 300;
while (it.hasNext()) {
ExecRow execRow = it.next();
assertNotNull(execRow);
assertEquals(count+first, execRow.getColumn(1).getInt());
count++;
}
assertEquals(40000, count);
}
@Test
public void testFailureAfterLimit() throws StandardException {
StreamListener<ExecRow> sl = new StreamListener<>(40000, 300);
HostAndPort hostAndPort = server.getHostAndPort();
server.register(sl);
List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>();
for(int i = 0; i < 100000; ++i) {
manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2)));
}
JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 13).mapToPair(new FailsFunction(40301));;
final StreamableRDD srdd = new StreamableRDD(rdd.values(), sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort());
new Thread() {
@Override
public void run() {
try {
srdd.submit();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}.start();
Iterator<ExecRow> it = sl.getIterator();
int count = 0;
int first = 300;
while (it.hasNext()) {
ExecRow execRow = it.next();
assertNotNull(execRow);
assertEquals(count+first, execRow.getColumn(1).getInt());
count++;
}
assertEquals(40000, count);
}
@Test
public void testFailureDuringRecoveryWarmup() throws StandardException, FileNotFoundException, UnsupportedEncodingException {
int size = 100000;
int batches = 2;
int batchSize = 512;
StreamListener<ExecRow> sl = new StreamListener<>(-1, 0, batches, batchSize);
HostAndPort hostAndPort = server.getHostAndPort();
server.register(sl);
List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>();
for(int i = 0; i < size; ++i) {
manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2)));
}
JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 2).sortByKey().mapToPair(new FailsTwiceFunction(10000, 100));
final StreamableRDD srdd = new StreamableRDD(rdd.values(), null, sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort(), batches, batchSize);
new Thread() {
@Override
public void run() {
try {
srdd.submit();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}.start();
Iterator<ExecRow> it = sl.getIterator();
int count = 0;
while (it.hasNext()) {
ExecRow execRow = it.next();
assertNotNull(execRow);
count++;
}
assertEquals(size, count);
}
@Test
public void testFailureAfterRecoveryWarmup() throws StandardException, FileNotFoundException, UnsupportedEncodingException {
int size = 100000;
int batches = 2;
int batchSize = 512;
StreamListener<ExecRow> sl = new StreamListener<>(-1, 0, batches, batchSize);
HostAndPort hostAndPort = server.getHostAndPort();
server.register(sl);
List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>();
for(int i = 0; i < size; ++i) {
manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2)));
}
JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 2).sortByKey().mapToPair(new FailsTwiceFunction(10000, 2000));
final StreamableRDD srdd = new StreamableRDD(rdd.values(), null, sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort(), batches, batchSize);
new Thread() {
@Override
public void run() {
try {
srdd.submit();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}.start();
Iterator<ExecRow> it = sl.getIterator();
int count = 0;
while (it.hasNext()) {
ExecRow execRow = it.next();
assertNotNull(execRow);
count++;
}
assertEquals(size, count);
}
@Test
public void testPersistentFailureWithOffset() throws StandardException, FileNotFoundException, UnsupportedEncodingException {
int size = 100000;
int batches = 2;
int batchSize = 512;
StreamListener<ExecRow> sl = new StreamListener<>(-1, 10, batches, batchSize);
HostAndPort hostAndPort = server.getHostAndPort();
server.register(sl);
List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>();
for(int i = 0; i < size; ++i) {
manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2)));
}
JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 2).sortByKey().mapToPair(new FailsForeverFunction(0));
final StreamableRDD srdd = new StreamableRDD(rdd.distinct().values(), null, sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort(), batches, batchSize);
new Thread() {
@Override
public void run() {
try {
srdd.submit();
} catch (Exception e) {
sl.failed(e);
throw new RuntimeException(e);
}
}
}.start();
// This call shoul not raise an exception even though the Spark job fails
Iterator<ExecRow> it = sl.getIterator();
try {
it.hasNext();
fail("Should have raised exception");
} catch (Exception e) {
//expected exception
}
}
}
class FailsFunction implements PairFunction<Tuple2<ExecRow, ExecRow>, ExecRow, ExecRow> {
static AtomicBoolean failed = new AtomicBoolean(false);
final long toFail;
public FailsFunction(long toFail) {
this.toFail = toFail;
}
@Override
public Tuple2<ExecRow, ExecRow> call(Tuple2<ExecRow, ExecRow> element) throws Exception {
if (element._1().getColumn(1).getInt() == toFail && !failed.get()) {
failed.set(true);
throw new RuntimeException("Failure");
}
return element;
}
public static void reset() {
failed.set(false);
}
}
class FailsTwiceFunction implements PairFunction<Tuple2<ExecRow, ExecRow>, ExecRow, ExecRow> {
static AtomicBoolean failed = new AtomicBoolean(false);
static AtomicBoolean failed2 = new AtomicBoolean(false);
final long toFail;
final long toFail2;
public FailsTwiceFunction(long toFail, long toFail2) {
this.toFail = toFail;
this.toFail2 = toFail2;
}
@Override
public Tuple2<ExecRow, ExecRow> call(Tuple2<ExecRow, ExecRow> element) throws Exception {
if (!failed.get()) {
if (element._1().getColumn(1).getInt() == toFail) {
failed.set(true);
throw new RuntimeException("First Failure");
}
} else if (!failed2.get()) {
if (element._1().getColumn(1).getInt() == toFail2) {
failed2.set(true);
throw new RuntimeException("Second Failure");
}
}
return element;
}
public static void reset() {
failed.set(false);
failed2.set(false);
}
}
class FailsForeverFunction implements PairFunction<Tuple2<ExecRow, ExecRow>, ExecRow, ExecRow> {
final long toFail;
public FailsForeverFunction(long toFail) {
this.toFail = toFail;
}
@Override
public Tuple2<ExecRow, ExecRow> call(Tuple2<ExecRow, ExecRow> element) throws Exception {
if (element._1().getColumn(1).getInt() == toFail) {
throw new RuntimeException("Failure");
}
return element;
}
}<|fim▁end|> | public void testFailureBeforeOffset() throws StandardException {
StreamListener<ExecRow> sl = new StreamListener<>(40000, 300); |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from setuptools import setup, find_packages
from trackma import utils
try:
LONG_DESCRIPTION = open("README.rst").read()
except IOError:
LONG_DESCRIPTION = __doc__
NAME = "Trackma"
REQUIREMENTS = []
EXTRA_REQUIREMENTS = {
'curses': ['urwid'],
'GTK': ['pygobject'],
'Qt': [],
}
setup(
name=NAME,
version=utils.VERSION,
packages=find_packages(),
install_requires=REQUIREMENTS,
extras_require=EXTRA_REQUIREMENTS,
package_data={'trackma': ['data/*']},
<|fim▁hole|> author='z411',
author_email='[email protected]',
description='Open multi-site list manager',
long_description=LONG_DESCRIPTION,
url='https://github.com/z411/trackma',
keywords='list manager, curses, gtk, qt, myanimelist, hummingbird, vndb',
license="GPL-3",
entry_points={
'console_scripts': [
'trackma = trackma.ui.cli:main',
'trackma-curses = trackma.ui.curses:main [curses]',
],
'gui_scripts': [
'trackma-gtk = trackma.ui.gtkui:main [GTK]',
'trackma-qt = trackma.ui.qtui:main [Qt]',
'trackma-qt4 = trackma.ui.qt4ui:main [Qt]',
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: End Users/Desktop',
'Topic :: Internet',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: POSIX',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
]
)<|fim▁end|> | |
<|file_name|>app.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from models.zips import Zips
from geopy import distance
from geopy import Point
connect('scratch', host='mongodb://142.133.150.180/scratch')
# zipins = Zips(zipcode=999999, city="testlocation", loc=[1.0,1.0],pop=12345, state="ZZ").save()
locationList = []
location = {}
distanceList = []
for zip in Zips.objects:
locationList.append(zip)
for location1 in locationList:
if location1.city=="BEVERLY HILLS" :
point1 = Point(location1.loc[0], location1.loc[1])
for location2 in locationList:
if location1 != location2 and location2.city !="BEVERLY HILLS":
point2 = Point(location2.loc[0], location2.loc[1])
if(distance.distance(point1, point2) < 5):
distanceList.append(location2)
for location in distanceList:
print (location.city, location.zipcode)<|fim▁end|> | from mongoengine import * |
<|file_name|>lexical.go<|end_file_name|><|fim▁begin|>// Package lexical computes the structure of the lexical environment,
// including the definition of and references to all universal,
// package-level, file-level and function-local entities. It does not
// record qualified identifiers, labels, struct fields, or methods.
//
// It is intended for renaming and refactoring tools, which need a more
// precise understanding of identifier resolution than is available from
// the output of the type-checker alone.
//
// THIS INTERFACE IS EXPERIMENTAL AND MAY CHANGE OR BE REMOVED IN FUTURE.
//<|fim▁hole|>// OVERVIEW
//
// As we traverse the AST, we build a "spaghetti stack" of Blocks,
// i.e. a tree with parent edges pointing to the root. Each time we
// visit an identifier that's a reference into the lexical environment,
// we create and save an Environment, which captures the current mapping
// state of the Block; these are saved for the client.
//
// We don't bother recording non-lexical references.
// TODO(adonovan):
// - make it robust against syntax errors. Audit all type assertions, etc.
// - better still, after the Go 1.4 thaw, move this into go/types.
// I don't think it need be a big change since the visitor is already there;
// we just need to records Environments. lexical.Block is analogous
// to types.Scope.
import (
"fmt"
"go/ast"
"go/token"
"os"
"strconv"
"golang.org/x/tools/go/types"
)
const trace = false
var logf = func(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, format, args...)
}
// A Block is a level of the lexical environment, a tree of blocks.
// It maps names to objects.
//
type Block struct {
kind string // one of universe package file func block if switch typeswitch case for range
syntax ast.Node // syntax declaring the block (nil for universe and package) [needed?]
parent Environment
bindings []types.Object // bindings in lexical order
index map[string]int // maps a name to the index of its binding, for fast lookup
}
// An Environment is a snapshot of a Block taken at a certain lexical
// position. It may contain bindings for fewer names than the
// (completed) block, or different bindings for names that are
// re-defined later in the block.
//
// For example, the lexical Block for the function f below contains a
// binding for the local var x, but the Environments captured by at the
// two print(x) calls differ: the first contains this binding, the
// second does not. The first Environment contains a different binding
// for x: the string var defined in the package block, an ancestor.
//
// var x string
// func f() {
// print(x)
// x := 1
// print(x)
// }
//
type Environment struct {
block *Block
nbindings int // length of prefix of block.bindings that's visible
}
// Depth returns the depth of this block in the block tree.
// The universal block has depth 1, a package block 2, a file block 3, etc.
func (b *Block) Depth() int {
if b == nil {
return 0
}
return 1 + b.parent.block.Depth()
}
// env returns an Environment that is a snapshot of b's current state.
func (b *Block) env() Environment {
return Environment{b, len(b.bindings)}
}
// Lookup returns the definition of name in the environment specified by
// env, and the Block that defines it, which may be an ancestor.
func (env Environment) Lookup(name string) (types.Object, *Block) {
if env.block == nil {
return nil, nil
}
return lookup(env.block, name, env.nbindings)
}
// nbindings specifies what prefix of b.bindings should be considered visible.
func lookup(b *Block, name string, nbindings int) (types.Object, *Block) {
if b == nil {
return nil, nil
}
if i, ok := b.index[name]; ok && i < nbindings {
return b.bindings[i], b
}
parent := b.parent
if parent.block == nil {
return nil, nil
}
return lookup(parent.block, name, parent.nbindings)
}
// Lookup returns the definition of name in the environment specified by
// b, and the Block that defines it, which may be an ancestor.
func (b *Block) Lookup(name string) (types.Object, *Block) {
return b.env().Lookup(name)
}
// Block returns the block of which this environment is a partial view.
func (env Environment) Block() *Block {
return env.block
}
func (env Environment) String() string {
return fmt.Sprintf("%s:%d", env.block, env.nbindings)
}
func (b *Block) String() string {
var s string
if b.parent.block != nil {
s = b.parent.block.String()
s += "."
}
return s + b.kind
}
var universe = &Block{kind: "universe", index: make(map[string]int)}
func init() {
for i, name := range types.Universe.Names() {
obj := types.Universe.Lookup(name)
universe.bindings = append(universe.bindings, obj)
universe.index[name] = i
}
}
// -- resolver ---------------------------------------------------------
// A Reference provides the lexical environment for a given reference to
// an object in lexical scope.
type Reference struct {
Id *ast.Ident
Env Environment
}
// resolver holds the state of the identifier resolution visitation:
// the package information, the result, and the current block.
type resolver struct {
fset *token.FileSet
imports map[string]*types.Package
pkg *types.Package
info *types.Info
// visitor state
block *Block
result *Info
}
func (r *resolver) setBlock(kind string, syntax ast.Node) *Block {
b := &Block{
kind: kind,
syntax: syntax,
parent: r.block.env(),
index: make(map[string]int),
}
if syntax != nil {
r.result.Blocks[syntax] = b
}
r.block = b
return b
}
func (r *resolver) use(id *ast.Ident, env Environment) {
if id.Name == "_" {
return // an error
}
obj, _ := env.Lookup(id.Name)
if obj == nil {
logf("%s: lookup of %s failed\n", r.fset.Position(id.Pos()), id.Name)
} else if want := r.info.Uses[id]; obj != want {
// sanity check against go/types resolver
logf("%s: internal error: lookup of %s yielded wrong object: got %v (%s), want %v\n",
r.fset.Position(id.Pos()), id.Name, types.ObjectString(r.pkg, obj),
r.fset.Position(obj.Pos()),
want)
}
if trace {
logf("use %s = %v in %s\n", id.Name, types.ObjectString(r.pkg, obj), env)
}
r.result.Refs[obj] = append(r.result.Refs[obj], Reference{id, env})
}
func (r *resolver) define(b *Block, id *ast.Ident) {
obj := r.info.Defs[id]
if obj == nil {
logf("%s: internal error: not a defining ident: %s\n",
r.fset.Position(id.Pos()), id.Name)
panic(id)
}
r.defineObject(b, id.Name, obj)
// Objects (other than PkgName) defined at file scope
// are also defined in the enclosing package scope.
if _, ok := b.syntax.(*ast.File); ok {
switch obj.(type) {
default:
r.defineObject(b.parent.block, id.Name, obj)
case nil, *types.PkgName:
}
}
}
// Used for implicit objects created by some ImportSpecs and CaseClauses.
func (r *resolver) defineImplicit(b *Block, n ast.Node, name string) {
obj := r.info.Implicits[n]
if obj == nil {
logf("%s: internal error: not an implicit definition: %T\n",
r.fset.Position(n.Pos()), n)
}
r.defineObject(b, name, obj)
}
func (r *resolver) defineObject(b *Block, name string, obj types.Object) {
if obj.Name() == "_" {
return
}
i := len(b.bindings)
b.bindings = append(b.bindings, obj)
b.index[name] = i
if trace {
logf("def %s = %s in %s\n", name, types.ObjectString(r.pkg, obj), b)
}
r.result.Defs[obj] = b
}
func (r *resolver) function(recv *ast.FieldList, typ *ast.FuncType, body *ast.BlockStmt, syntax ast.Node) {
// Use all signature types in enclosing block.
r.expr(typ)
r.fieldList(recv, false)
savedBlock := r.block // save
r.setBlock("func", syntax)
// Define all parameters/results, and visit the body, within the func block.
r.fieldList(typ.Params, true)
r.fieldList(typ.Results, true)
r.fieldList(recv, true)
if body != nil {
r.stmtList(body.List)
}
r.block = savedBlock // restore
}
func (r *resolver) fieldList(list *ast.FieldList, def bool) {
if list != nil {
for _, f := range list.List {
if def {
for _, id := range f.Names {
r.define(r.block, id)
}
} else {
r.expr(f.Type)
}
}
}
}
func (r *resolver) exprList(list []ast.Expr) {
for _, x := range list {
r.expr(x)
}
}
func (r *resolver) expr(n ast.Expr) {
switch n := n.(type) {
case *ast.BadExpr:
case *ast.BasicLit:
// no-op
case *ast.Ident:
r.use(n, r.block.env())
case *ast.Ellipsis:
if n.Elt != nil {
r.expr(n.Elt)
}
case *ast.FuncLit:
r.function(nil, n.Type, n.Body, n)
case *ast.CompositeLit:
if n.Type != nil {
r.expr(n.Type)
}
tv := r.info.Types[n]
if _, ok := deref(tv.Type).Underlying().(*types.Struct); ok {
for _, elt := range n.Elts {
if kv, ok := elt.(*ast.KeyValueExpr); ok {
r.expr(kv.Value)
// Also uses field kv.Key (non-lexical)
// id := kv.Key.(*ast.Ident)
// obj := r.info.Uses[id]
// logf("use %s = %v (field)\n",
// id.Name, types.ObjectString(r.pkg, obj))
// TODO make a fake FieldVal selection?
} else {
r.expr(elt)
}
}
} else {
r.exprList(n.Elts)
}
case *ast.ParenExpr:
r.expr(n.X)
case *ast.SelectorExpr:
r.expr(n.X)
// Non-lexical reference to field/method, or qualified identifier.
// if sel, ok := r.info.Selections[n]; ok { // selection
// switch sel.Kind() {
// case types.FieldVal:
// logf("use %s = %v (field)\n",
// n.Sel.Name, types.ObjectString(r.pkg, sel.Obj()))
// case types.MethodExpr, types.MethodVal:
// logf("use %s = %v (method)\n",
// n.Sel.Name, types.ObjectString(r.pkg, sel.Obj()))
// }
// } else { // qualified identifier
// obj := r.info.Uses[n.Sel]
// logf("use %s = %v (qualified)\n", n.Sel.Name, obj)
// }
case *ast.IndexExpr:
r.expr(n.X)
r.expr(n.Index)
case *ast.SliceExpr:
r.expr(n.X)
if n.Low != nil {
r.expr(n.Low)
}
if n.High != nil {
r.expr(n.High)
}
if n.Max != nil {
r.expr(n.Max)
}
case *ast.TypeAssertExpr:
r.expr(n.X)
if n.Type != nil {
r.expr(n.Type)
}
case *ast.CallExpr:
r.expr(n.Fun)
r.exprList(n.Args)
case *ast.StarExpr:
r.expr(n.X)
case *ast.UnaryExpr:
r.expr(n.X)
case *ast.BinaryExpr:
r.expr(n.X)
r.expr(n.Y)
case *ast.KeyValueExpr:
r.expr(n.Key)
r.expr(n.Value)
case *ast.ArrayType:
if n.Len != nil {
r.expr(n.Len)
}
r.expr(n.Elt)
case *ast.StructType:
// Use all the type names, but don't define any fields.
r.fieldList(n.Fields, false)
case *ast.FuncType:
// Use all the type names, but don't define any vars.
r.fieldList(n.Params, false)
r.fieldList(n.Results, false)
case *ast.InterfaceType:
// Use all the type names, but don't define any methods.
r.fieldList(n.Methods, false)
case *ast.MapType:
r.expr(n.Key)
r.expr(n.Value)
case *ast.ChanType:
r.expr(n.Value)
default:
panic(n)
}
}
func (r *resolver) stmtList(list []ast.Stmt) {
for _, s := range list {
r.stmt(s)
}
}
func (r *resolver) stmt(n ast.Stmt) {
switch n := n.(type) {
case *ast.BadStmt:
case *ast.EmptyStmt:
// nothing to do
case *ast.DeclStmt:
decl := n.Decl.(*ast.GenDecl)
for _, spec := range decl.Specs {
switch spec := spec.(type) {
case *ast.ValueSpec: // const or var
if spec.Type != nil {
r.expr(spec.Type)
}
r.exprList(spec.Values)
for _, name := range spec.Names {
r.define(r.block, name)
}
case *ast.TypeSpec:
r.define(r.block, spec.Name)
r.expr(spec.Type)
}
}
case *ast.LabeledStmt:
// Also defines label n.Label (non-lexical)
r.stmt(n.Stmt)
case *ast.ExprStmt:
r.expr(n.X)
case *ast.SendStmt:
r.expr(n.Chan)
r.expr(n.Value)
case *ast.IncDecStmt:
r.expr(n.X)
case *ast.AssignStmt:
if n.Tok == token.DEFINE {
r.exprList(n.Rhs)
for _, lhs := range n.Lhs {
id := lhs.(*ast.Ident)
if _, ok := r.info.Defs[id]; ok {
r.define(r.block, id)
} else {
r.use(id, r.block.env())
}
}
} else { // ASSIGN
r.exprList(n.Lhs)
r.exprList(n.Rhs)
}
case *ast.GoStmt:
r.expr(n.Call)
case *ast.DeferStmt:
r.expr(n.Call)
case *ast.ReturnStmt:
r.exprList(n.Results)
case *ast.BranchStmt:
if n.Label != nil {
// Also uses label n.Label (non-lexical)
}
case *ast.SelectStmt:
r.stmtList(n.Body.List)
case *ast.BlockStmt: // (explicit blocks only)
savedBlock := r.block // save
r.setBlock("block", n)
r.stmtList(n.List)
r.block = savedBlock // restore
case *ast.IfStmt:
savedBlock := r.block // save
r.setBlock("if", n)
if n.Init != nil {
r.stmt(n.Init)
}
r.expr(n.Cond)
r.stmt(n.Body) // new block
if n.Else != nil {
r.stmt(n.Else)
}
r.block = savedBlock // restore
case *ast.CaseClause:
savedBlock := r.block // save
r.setBlock("case", n)
if obj, ok := r.info.Implicits[n]; ok {
// e.g.
// switch y := x.(type) {
// case T: // we declare an implicit 'var y T' in this block
// }
r.defineImplicit(r.block, n, obj.Name())
}
r.exprList(n.List)
r.stmtList(n.Body)
r.block = savedBlock // restore
case *ast.SwitchStmt:
savedBlock := r.block // save
r.setBlock("switch", n)
if n.Init != nil {
r.stmt(n.Init)
}
if n.Tag != nil {
r.expr(n.Tag)
}
r.stmtList(n.Body.List)
r.block = savedBlock // restore
case *ast.TypeSwitchStmt:
savedBlock := r.block // save
r.setBlock("typeswitch", n)
if n.Init != nil {
r.stmt(n.Init)
}
if assign, ok := n.Assign.(*ast.AssignStmt); ok { // y := x.(type)
r.expr(assign.Rhs[0]) // skip y: not a defining ident
} else {
r.stmt(n.Assign)
}
r.stmtList(n.Body.List)
r.block = savedBlock // restore
case *ast.CommClause:
savedBlock := r.block // save
r.setBlock("case", n)
if n.Comm != nil {
r.stmt(n.Comm)
}
r.stmtList(n.Body)
r.block = savedBlock // restore
case *ast.ForStmt:
savedBlock := r.block // save
r.setBlock("for", n)
if n.Init != nil {
r.stmt(n.Init)
}
if n.Cond != nil {
r.expr(n.Cond)
}
if n.Post != nil {
r.stmt(n.Post)
}
r.stmt(n.Body)
r.block = savedBlock // restore
case *ast.RangeStmt:
r.expr(n.X)
savedBlock := r.block // save
r.setBlock("range", n)
if n.Tok == token.DEFINE {
if n.Key != nil {
r.define(r.block, n.Key.(*ast.Ident))
}
if n.Value != nil {
r.define(r.block, n.Value.(*ast.Ident))
}
} else {
if n.Key != nil {
r.expr(n.Key)
}
if n.Value != nil {
r.expr(n.Value)
}
}
r.stmt(n.Body)
r.block = savedBlock // restore
default:
panic(n)
}
}
func (r *resolver) doImport(s *ast.ImportSpec, fileBlock *Block) {
path, _ := strconv.Unquote(s.Path.Value)
pkg := r.imports[path]
if s.Name == nil { // normal
r.defineImplicit(fileBlock, s, pkg.Name())
} else if s.Name.Name == "." { // dot import
for _, name := range pkg.Scope().Names() {
if ast.IsExported(name) {
obj := pkg.Scope().Lookup(name)
r.defineObject(fileBlock, name, obj)
}
}
} else { // renaming import
r.define(fileBlock, s.Name)
}
}
func (r *resolver) doPackage(pkg *types.Package, files []*ast.File) {
r.block = universe
r.result.Blocks[nil] = universe
r.result.PackageBlock = r.setBlock("package", nil)
var fileBlocks []*Block
// 1. Insert all package-level objects into file and package blocks.
// (PkgName objects are only inserted into file blocks.)
for _, f := range files {
r.block = r.result.PackageBlock
fileBlock := r.setBlock("file", f) // package is not yet visible to file
fileBlocks = append(fileBlocks, fileBlock)
for _, d := range f.Decls {
switch d := d.(type) {
case *ast.GenDecl:
for _, s := range d.Specs {
switch s := s.(type) {
case *ast.ImportSpec:
r.doImport(s, fileBlock)
case *ast.ValueSpec: // const or var
for _, name := range s.Names {
r.define(r.result.PackageBlock, name)
}
case *ast.TypeSpec:
r.define(r.result.PackageBlock, s.Name)
}
}
case *ast.FuncDecl:
if d.Recv == nil { // function
if d.Name.Name != "init" {
r.define(r.result.PackageBlock, d.Name)
}
}
}
}
}
// 2. Now resolve bodies of GenDecls and FuncDecls.
for i, f := range files {
fileBlock := fileBlocks[i]
fileBlock.parent = r.result.PackageBlock.env() // make entire package visible to this file
for _, d := range f.Decls {
r.block = fileBlock
switch d := d.(type) {
case *ast.GenDecl:
for _, s := range d.Specs {
switch s := s.(type) {
case *ast.ValueSpec: // const or var
if s.Type != nil {
r.expr(s.Type)
}
r.exprList(s.Values)
case *ast.TypeSpec:
r.expr(s.Type)
}
}
case *ast.FuncDecl:
r.function(d.Recv, d.Type, d.Body, d)
}
}
}
r.block = nil
}
// An Info contains the lexical reference structure of a package.
type Info struct {
Defs map[types.Object]*Block // maps each object to its defining lexical block
Refs map[types.Object][]Reference // maps each object to the set of references to it
Blocks map[ast.Node]*Block // maps declaring syntax to block; nil => universe
PackageBlock *Block // the package-level lexical block
}
// Structure computes the structure of the lexical environment of the
// package specified by (pkg, info, files).
//
// The info.{Types,Defs,Uses,Implicits} maps must have been populated
// by the type-checker
//
// fset is used for logging.
//
func Structure(fset *token.FileSet, pkg *types.Package, info *types.Info, files []*ast.File) *Info {
r := resolver{
fset: fset,
imports: make(map[string]*types.Package),
result: &Info{
Defs: make(map[types.Object]*Block),
Refs: make(map[types.Object][]Reference),
Blocks: make(map[ast.Node]*Block),
},
pkg: pkg,
info: info,
}
// Build import map for just this package.
r.imports["unsafe"] = types.Unsafe
for _, imp := range pkg.Imports() {
r.imports[imp.Path()] = imp
}
r.doPackage(pkg, files)
return r.result
}
// -- Plundered from golang.org/x/tools/go/ssa -----------------
// deref returns a pointer's element type; otherwise it returns typ.
func deref(typ types.Type) types.Type {
if p, ok := typ.Underlying().(*types.Pointer); ok {
return p.Elem()
}
return typ
}<|fim▁end|> | package lexical
|
<|file_name|>day1.rs<|end_file_name|><|fim▁begin|>use std::io;
use common;
use std::collections::HashSet;
use std::error::Error;
fn dist(pos: (i32, i32)) -> i32 {
let (x, y) = pos;
return x.abs() + y.abs()
}
pub struct Day1 {
pos: (i32, i32),
dir: i32,
}
impl Day1 {
pub fn new() -> Day1 {
Day1{
pos: (0, 0),
dir: (0),
}
}
fn walk(&mut self) -> (i32, i32) {
let backwards = if self.dir & 2 == 2 { -1 } else { 1 };
let (x, y) = self.pos;
self.pos = (x + backwards * (1 - (self.dir & 1)), y + backwards * (self.dir & 1));
self.pos
}
fn turn(&mut self, action: &str) {
if action == "R" {
self.dir += 1;
} else {
self.dir += 3;
}
self.dir %= 4;
}
}
impl common::Solution for Day1 {
fn part1(&mut self, input: &mut io::Read) -> String {
let mut content = String::new();
match input.read_to_string(&mut content) {
Err(why) => panic!("couldn't open input: {}", why.description()),
Ok(_) => {},
};
for instruction in content.split(", ") {
let turn = &instruction[..1];
let steps: i32 = instruction[1..].trim().parse().expect("Invalid number of steps");
self.turn(turn);
for _ in 0..steps {
self.walk();
}
}
format!("{}", dist(self.pos))
}
fn part2(&mut self, input: &mut io::Read) -> String {
let mut content = String::new();
match input.read_to_string(&mut content) {<|fim▁hole|> Err(why) => panic!("couldn't open input: {}", why.description()),
Ok(_) => {},
};
let mut positions = HashSet::new();
positions.insert(self.pos);
for instruction in content.split(", ") {
let turn = &instruction[..1];
let steps: i32 = instruction[1..].trim().parse().expect("Invalid number of steps");
self.turn(turn);
for _ in 0..steps {
let pos = self.walk();
if positions.contains(&pos) {
return format!("{}", dist(pos))
} else {
positions.insert(pos);
}
}
}
panic!("Never visited anything twice!")
}
}
#[cfg(test)]
mod tests {
use super::Day1;
use common::Solution;
#[test]
fn sample_part1() {
let mut instance = Day1::new();
assert_eq!("8", instance.part1(&mut "R8, R4, R4, R8".as_bytes()))
}
#[test]
fn sample_part2() {
let mut instance = Day1::new();
assert_eq!("4", instance.part2(&mut "R8, R4, R4, R8".as_bytes()))
}
}<|fim▁end|> | |
<|file_name|>test_docstring.py<|end_file_name|><|fim▁begin|>from twisted.trial import unittest
from twistedchecker.checkers.docstring import DocstringChecker
class DocstringTestCase(unittest.TestCase):
"""
Test for twistedchecker.checkers.docstring
"""
<|fim▁hole|> """
Test of twistedchecker.checkers.docstring._getLineIndent.
"""
checker = DocstringChecker()
indentNoSpace = checker._getLineIndent("foo")
indentTwoSpaces = checker._getLineIndent(" foo")
indentFourSpaces = checker._getLineIndent(" foo")
self.assertEqual(indentNoSpace, 0)
self.assertEqual(indentTwoSpaces, 2)
self.assertEqual(indentFourSpaces, 4)<|fim▁end|> | def test_getLineIndent(self): |
<|file_name|>node_types.py<|end_file_name|><|fim▁begin|>class NodeType(object):
Base = 'base'
Model = 'model'
Analysis = 'analysis'
Test = 'test'
Archive = 'archive'
Macro = 'macro'
Operation = 'operation'
Seed = 'seed'
Documentation = 'documentation'
@classmethod
def executable(cls):
return [
cls.Model,
cls.Test,
cls.Archive,
cls.Analysis,
cls.Operation,
cls.Seed,
cls.Documentation,
]
@classmethod
def refable(cls):
return [
cls.Model,<|fim▁hole|> cls.Seed,
]
class RunHookType:
Start = 'on-run-start'
End = 'on-run-end'
Both = [Start, End]<|fim▁end|> | |
<|file_name|>FrequencyDomain.py<|end_file_name|><|fim▁begin|># coding=utf-8
from __future__ import division
from ..BaseIndicator import Indicator as _Indicator
from ..tools.Tools import PSD as PSD
import numpy as _np
__author__ = 'AleB'
class InBand(_Indicator):
"""
Extract the PSD of a given frequency band
Parameters
----------
freq_min : float, >0
Left bound of the frequency band
freq_max : float, >0
Right bound of the frequency band
method : 'ar', 'welch' or 'fft'
Method to estimate the PSD
Additional parameters
---------------------
For the PSD (see pyphysio.tools.Tools.PSD), for instance:
interp_freq : float, >0
Frequency used to (re-)interpolate the signal
Returns
-------
freq : numpy array
Frequencies in the frequency band
psd : float
Power Spectrum Density in the frequency band
"""
def __init__(self, freq_min, freq_max, method, **kwargs):
_Indicator.__init__(self, freq_min=freq_min, freq_max=freq_max, method=method, **kwargs)
@classmethod
def algorithm(cls, data, params):
freq, spec = PSD(**params)(data)
# freq is sorted so
i_min = _np.searchsorted(freq, params["freq_min"])
i_max = _np.searchsorted(freq, params["freq_max"])
return freq[i_min:i_max], spec[i_min:i_max]
class PowerInBand(_Indicator):<|fim▁hole|> """
Estimate the power in given frequency band
Parameters
----------
freq_min : float, >0
Left bound of the frequency band
freq_max : float, >0
Right bound of the frequency band
method : 'ar', 'welch' or 'fft'
Method to estimate the PSD
Additional parameters
---------------------
For the PSD (see pyphysio.tools.Tools.PSD):
interp_freq : float, >0
Frequency used to (re-)interpolate the signal
Returns
-------
power : float
Power in the frequency band
"""
def __init__(self, freq_min, freq_max, method, **kwargs):
_Indicator.__init__(self, freq_min=freq_min, freq_max=freq_max, method=method, **kwargs)
@classmethod
def algorithm(cls, data, params):
freq, powers = InBand(**params)(data)
return _np.sum(powers)
class PeakInBand(_Indicator):
"""
Estimate the peak frequency in a given frequency band
Parameters
----------
freq_min : float, >0
Left bound of the frequency band
freq_max : float, >0
Right bound of the frequency band
method : 'ar', 'welch' or 'fft'
Method to estimate the PSD
Additional parameters
---------------------
For the PSD (see pyphysio.tools.Tools.PSD):
interp_freq : float, >0
Frequency used to (re-)interpolate the signal
Returns
-------
peak : float
Peak frequency
"""
def __init__(self, freq_min, freq_max, method, **kwargs):
_Indicator.__init__(self, freq_min=freq_min, freq_max=freq_max, method=method, **kwargs)
@classmethod
def algorithm(cls, data, params):
freq, power = InBand(**params)(data)
return freq[_np.argmax(power)]<|fim▁end|> | |
<|file_name|>zigbee.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding:UTF-8 -*-
import urllib
import urllib2
import json
import serial
import time
import gpio
import re
import binascii
import threading
import datetime
import sys
# use your deviceID and apikey
deviceID="xxxxxxxxxx"
apikey = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
key_pin = "gpio12"
s = ""
door = ""
PIR = ""
Leak = ""
Smoke = ""
Remote = ""
Door_mac = ""
PIR_mac = ""
Leak_mac = ""
Smoke_mac = ""
Remote_mac = ""
# use USB UART or UART on pcDuino to communicate with zigbee gateway
try:
ser = serial.Serial("/dev/ttyUSB0", 115200,timeout = 0.1)
except serial.serialutil.SerialException:
try:
ser = serial.Serial("/dev/ttyS1", 115200,timeout = 0.1)
with open("/sys/devices/virtual/misc/gpio/mode/gpio0",'w') as UART_RX:
UART_RX.write('3')
with open("/sys/devices/virtual/misc/gpio/mode/gpio1",'w') as UART_TX:
UART_TX.write('3')
except serial.serialutil.SerialException:
print "serial failed!"
exit()
def setup():
gpio.pinMode(key_pin,gpio.INPUT)
def key_interrupt():
val=gpio.digitalRead(key_pin)
if val==0:
time.sleep(0.010)
if val==0:
return '1'
return '0'
def http_post(data):
try:
url = 'http://www.linksprite.io/api/http'
jdata = json.dumps(data)
req = urllib2.Request(url, jdata)
req.add_header('Content-Type','application/json')
response = urllib2.urlopen(req)
return response.read()
except urllib2.URLError:
print "connect failed"
return "connect failed"
pass
def hexShow(argv):
result = ''
hLen = len(argv)
for i in xrange(hLen):
hvol = ord(argv[i])
hhex = '%02x'%hvol
result += hhex+' '
return result
def register():
while True:
ser.write('\x02')
ser.write('\x75')
ser.write('\x1e')
data = ser.readline()
val=hexShow(data)
leng = len(val)
if leng > 45:
a = val.find("0e fc 02 e1",1)
if a != -1:
print "add equipment ok"
b=a+12
mac = val[b:b+29]
return mac
break
time.sleep(0.2)
def set_target(short_mac):
send = "0c fc 02 01 04 01 01 01 02"+short_mac+"02 0a"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
a = rec.find("04 fd 02 01",0)
if a != -1:
print "set target ok"
break
time.sleep(0.2)
def gateway_mac():
while True:
ser.write('\x02')
ser.write('\x14')
ser.write('\x6f')
data = ser.readline()
dat = hexShow(data)
leng = len(dat)
if leng > 30:
a = dat.find("0c 15 00 6f",0)
if a != -1:
dt = dat[15:38]
return dt
break
time.sleep(1)
def bind(eq_mac,gat_mac):
send = "16 d8"+eq_mac+"01 01 00 03"+gat_mac+"01"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
start = datetime.datetime.now()
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
b = rec.find("02 d9 00")
if b != -1:
print "bind ok"
break
time.sleep(0.2)
def cluster():
send = "08 FC 00 00 05 00 01 01 00"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
start = datetime.datetime.now()
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
leng = len(rec)
finsh = datetime.datetime.now()
tim = (finsh-start).seconds
if tim > 5:
print "failure! please add again"
return "xxxx"
break
if leng > 30:
b = rec.find("0b fe 03")
c = rec.find("00 01 07 fe 03 00")
if b != -1:
return rec[b+30:b+35]
break
elif c != -1:
return "11 00"
time.sleep(0.2)
def report():
send = "11 FC 00 01 00 06 01 00 21 00 20 f0 00 f0 00 01 00 00"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
leng = len(rec)
if leng > 15:
b = rec.find("06 fd 00")
if b != -1:
print "send report ok"
break
time.sleep(0.2)
def alarm():
line = ser.readline()
val = hexShow(line)
leng = len(val)
if leng >= 56:
#print val
po = val.find("fe 01")
if po != -1:
aa = val[po+21:po+26]
sta = val[po+46]
s = aa+sta
return s
return -1
def open_socket():
send = "05 FC 01 06 00 01"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
def close_socket():
send = "05 FC 01 06 00 00"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
def recovery():
global s
global PIR
s = '0'
PIR = '0'
values ={
"action":"update",
"apikey":apikey,
"deviceid":deviceID,
"params":
{
"PIR":PIR,
"SOS":s
}}
http_post(values)
def update(mac,sta):
global Door_mac
global PIR_mac
global Leak_mac
global Smoke_mac
global Remote_mac
global s
global door
global PIR
global Leak
global Smoke
global Remote
try:
f = open('door.txt','r')
Door_mac=f.read()
f.close()
except IOError:
pass
try:
f = open('pir.txt','r')
PIR_mac=f.read()
f.close()
except IOError:
pass
try:
f = open('leak.txt','r')
Leak_mac=f.read()
f.close()
except IOError:
pass
try:
f = open('smoke.txt','r')
Smoke_mac=f.read()
f.close()
except IOError:
pass
try:
f = open('remote.txt','r')
Remote_mac=f.read()
f.close()
except IOError:
pass
if mac == Door_mac:
door = sta
elif mac == PIR_mac:
PIR = sta
elif mac == Leak_mac:
Leak = sta
elif mac == Smoke_mac:
Smoke = sta
elif mac == Remote_mac:
Remote = sta
if sta == '1':
s = sta
else:
print "You should add the equipment first"
values ={
"action":"update",
"apikey":apikey,
"deviceid":deviceID,
"params":
{
"Door":door,
"PIR":PIR,
"Leak":Leak,
"Smoke":Smoke,
"Remote":Remote,
"SOS":s
}}
http_post(values)
if s == '1'or PIR == '1':
timer = threading.Timer(2,recovery)
timer.start()
def main():
global Door_mac
global PIR_mac
global Leak_mac
global Smoke_mac
global Remote_mac
setup()
if ser.isOpen() == True:
print "serial open succeed!"
else:
print "serial open failure!"
while True:
# If check the GPIO12's status, if it is high, excuete commands to
# add new zigbee device into zigbee gateway
a = key_interrupt()
if a == '1':
print "Add equipment!"
# Set gateway to allow adding device
val=register()
short = val[0:5]
print "short:"+short
mac = val[6:29]
print "mac:"+mac
# Get the gateway MAC address
gatmac=gateway_mac()
print "gatewaymac:"+gatmac
# Configure the communication with zigbee device<|fim▁hole|> # Bind the zigbee device
bind(mac,gatmac)
# Read the zone type to check the type of zigbee device
# which can identify the alarm information from different zigbee sensor.
zone_type=cluster()
print "zone_type:"+zone_type
if zone_type == "15 00":
Door_mac = short
f = open('door.txt','w')
f.write(short)
f.close()
report()
elif zone_type == "0d 00":
PIR_mac = short
f=open('pir.txt','w')
f.write(short)
f.close()
report()
elif zone_type == "2a 00":
Leak_mac = short
f=open('leak.txt','w')
f.write(short)
f.close()
report()
elif zone_type == "28 00":
Smoke_mac = short
f=open('smoke.txt','w')
f.write(short)
f.close()
report()
elif zone_type == "11 00":
Remote_mac = short
f=open('remote.txt','w')
f.write(short)
f.close()
report()
# Check the alarm information from zigbee sensor node
data=alarm()
if data != -1:
short_mac = data[0:5]
print"short mac:"+short_mac
status = data[5]
print"status:"+status
# upload the alarm information to linksprite.io server
update(short_mac,status)
time.sleep(0.2)
if __name__=='__main__':
try:
main()
except KeyboardInterrupt:
ser.close()<|fim▁end|> | set_target(short)
|
<|file_name|>triangle.rs<|end_file_name|><|fim▁begin|>#[macro_use]
extern crate glium;
mod support;
use glium::Surface;
use glium::glutin;
use glium::index::PrimitiveType;
fn main() {
use glium::DisplayBuild;
// building the display, ie. the main object
let display = glutin::WindowBuilder::new()
.build_glium()
.unwrap();
// building the vertex buffer, which contains all the vertices that we will draw
let vertex_buffer = {
#[derive(Copy, Clone)]
struct Vertex {
position: [f32; 2],
color: [f32; 3],
}
implement_vertex!(Vertex, position, color);
glium::VertexBuffer::new(&display,
vec![
Vertex { position: [-0.5, -0.5], color: [0.0, 1.0, 0.0] },
Vertex { position: [ 0.0, 0.5], color: [0.0, 0.0, 1.0] },
Vertex { position: [ 0.5, -0.5], color: [1.0, 0.0, 0.0] },
]
)
};
// building the index buffer
let index_buffer = glium::IndexBuffer::new(&display, PrimitiveType::TrianglesList,
vec![0u16, 1, 2]);
// compiling shaders and linking them together
let program = program!(&display,
140 => {
vertex: "
#version 140
uniform mat4 matrix;
in vec2 position;
in vec3 color;
out vec3 vColor;
void main() {
gl_Position = vec4(position, 0.0, 1.0) * matrix;
vColor = color;
}
",
fragment: "
#version 140
in vec3 vColor;
out vec4 f_color;
void main() {
f_color = vec4(vColor, 1.0);
}
"
},
110 => {
vertex: "
#version 110
uniform mat4 matrix;
attribute vec2 position;
attribute vec3 color;
varying vec3 vColor;
void main() {
gl_Position = vec4(position, 0.0, 1.0) * matrix;
vColor = color;
}
",
fragment: "
#version 110
varying vec3 vColor;
void main() {
gl_FragColor = vec4(vColor, 1.0);
}
",
},
100 => {
vertex: "
#version 100
uniform lowp mat4 matrix;
attribute lowp vec2 position;
attribute lowp vec3 color;
varying lowp vec3 vColor;
void main() {
gl_Position = vec4(position, 0.0, 1.0) * matrix;
vColor = color;
}
",
fragment: "
#version 100
varying lowp vec3 vColor;
void main() {
gl_FragColor = vec4(vColor, 1.0);
}
",
},
).unwrap();
// the main loop
support::start_loop(|| {
// building the uniforms
let uniforms = uniform! {
matrix: [
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0f32]<|fim▁hole|> ]
};
// drawing a frame
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 0.0);
target.draw(&vertex_buffer, &index_buffer, &program, &uniforms, &Default::default()).unwrap();
target.finish().unwrap();
// polling and handling the events received by the window
for event in display.poll_events() {
match event {
glutin::Event::Closed => return support::Action::Stop,
_ => ()
}
}
support::Action::Continue
});
}<|fim▁end|> | |
<|file_name|>spanner_pb2_grpc.py<|end_file_name|><|fim▁begin|># Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!<|fim▁hole|>
from google.cloud.spanner_v1.proto import (
result_set_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2,
)
from google.cloud.spanner_v1.proto import (
spanner_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2,
)
from google.cloud.spanner_v1.proto import (
transaction_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class SpannerStub(object):
"""Cloud Spanner API
The Cloud Spanner API can be used to manage sessions and execute
transactions on data stored in Cloud Spanner databases.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateSession = channel.unary_unary(
"/google.spanner.v1.Spanner/CreateSession",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CreateSessionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.FromString,
)
self.BatchCreateSessions = channel.unary_unary(
"/google.spanner.v1.Spanner/BatchCreateSessions",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsResponse.FromString,
)
self.GetSession = channel.unary_unary(
"/google.spanner.v1.Spanner/GetSession",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.GetSessionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.FromString,
)
self.ListSessions = channel.unary_unary(
"/google.spanner.v1.Spanner/ListSessions",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsResponse.FromString,
)
self.DeleteSession = channel.unary_unary(
"/google.spanner.v1.Spanner/DeleteSession",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.DeleteSessionRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ExecuteSql = channel.unary_unary(
"/google.spanner.v1.Spanner/ExecuteSql",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.FromString,
)
self.ExecuteStreamingSql = channel.unary_stream(
"/google.spanner.v1.Spanner/ExecuteStreamingSql",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.FromString,
)
self.ExecuteBatchDml = channel.unary_unary(
"/google.spanner.v1.Spanner/ExecuteBatchDml",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlResponse.FromString,
)
self.Read = channel.unary_unary(
"/google.spanner.v1.Spanner/Read",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.FromString,
)
self.StreamingRead = channel.unary_stream(
"/google.spanner.v1.Spanner/StreamingRead",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.FromString,
)
self.BeginTransaction = channel.unary_unary(
"/google.spanner.v1.Spanner/BeginTransaction",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BeginTransactionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.Transaction.FromString,
)
self.Commit = channel.unary_unary(
"/google.spanner.v1.Spanner/Commit",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitResponse.FromString,
)
self.Rollback = channel.unary_unary(
"/google.spanner.v1.Spanner/Rollback",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.RollbackRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.PartitionQuery = channel.unary_unary(
"/google.spanner.v1.Spanner/PartitionQuery",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionQueryRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.FromString,
)
self.PartitionRead = channel.unary_unary(
"/google.spanner.v1.Spanner/PartitionRead",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionReadRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.FromString,
)
class SpannerServicer(object):
"""Cloud Spanner API
The Cloud Spanner API can be used to manage sessions and execute
transactions on data stored in Cloud Spanner databases.
"""
def CreateSession(self, request, context):
"""Creates a new session. A session can be used to perform
transactions that read and/or modify data in a Cloud Spanner database.
Sessions are meant to be reused for many consecutive
transactions.
Sessions can only execute one transaction at a time. To execute
multiple concurrent read-write/write-only transactions, create
multiple sessions. Note that standalone reads and queries use a
transaction internally, and count toward the one transaction
limit.
Active sessions use additional server resources, so it is a good idea to
delete idle and unneeded sessions.
Aside from explicit deletes, Cloud Spanner can delete sessions for which no
operations are sent for more than an hour. If a session is deleted,
requests to it return `NOT_FOUND`.
Idle sessions can be kept alive by sending a trivial SQL query
periodically, e.g., `"SELECT 1"`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def BatchCreateSessions(self, request, context):
"""Creates multiple new sessions.
This API can be used to initialize a session cache on the clients.
See https://goo.gl/TgSFN2 for best practices on session cache management.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetSession(self, request, context):
"""Gets a session. Returns `NOT_FOUND` if the session does not exist.
This is mainly useful for determining whether a session is still
alive.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListSessions(self, request, context):
"""Lists all sessions in a given database.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteSession(self, request, context):
"""Ends a session, releasing server resources associated with it. This will
asynchronously trigger cancellation of any operations that are running with
this session.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExecuteSql(self, request, context):
"""Executes an SQL statement, returning all results in a single reply. This
method cannot be used to return a result set larger than 10 MiB;
if the query yields more data than that, the query fails with
a `FAILED_PRECONDITION` error.
Operations inside read-write transactions might return `ABORTED`. If
this occurs, the application should restart the transaction from
the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.
Larger result sets can be fetched in streaming fashion by calling
[ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExecuteStreamingSql(self, request, context):
"""Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result
set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there
is no limit on the size of the returned result set. However, no
individual row in the result set can exceed 100 MiB, and no
column value can exceed 10 MiB.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExecuteBatchDml(self, request, context):
"""Executes a batch of SQL DML statements. This method allows many statements
to be run with lower latency than submitting them sequentially with
[ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
Statements are executed in sequential order. A request can succeed even if
a statement fails. The [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] field in the
response provides information about the statement that failed. Clients must
inspect this field to determine whether an error occurred.
Execution stops after the first failed statement; the remaining statements
are not executed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Read(self, request, context):
"""Reads rows from the database using key lookups and scans, as a
simple key/value style alternative to
[ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to
return a result set larger than 10 MiB; if the read matches more
data than that, the read fails with a `FAILED_PRECONDITION`
error.
Reads inside read-write transactions might return `ABORTED`. If
this occurs, the application should restart the transaction from
the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.
Larger result sets can be yielded in streaming fashion by calling
[StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def StreamingRead(self, request, context):
"""Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a
stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the
size of the returned result set. However, no individual row in
the result set can exceed 100 MiB, and no column value can exceed
10 MiB.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def BeginTransaction(self, request, context):
"""Begins a new transaction. This step can often be skipped:
[Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
[Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
side-effect.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Commit(self, request, context):
"""Commits a transaction. The request includes the mutations to be
applied to rows in the database.
`Commit` might return an `ABORTED` error. This can occur at any time;
commonly, the cause is conflicts with concurrent
transactions. However, it can also happen for a variety of other
reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
the transaction from the beginning, re-using the same session.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Rollback(self, request, context):
"""Rolls back a transaction, releasing any locks it holds. It is a good
idea to call this for any transaction that includes one or more
[Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and
ultimately decides not to commit.
`Rollback` returns `OK` if it successfully aborts the transaction, the
transaction was already aborted, or the transaction is not
found. `Rollback` never returns `ABORTED`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def PartitionQuery(self, request, context):
"""Creates a set of partition tokens that can be used to execute a query
operation in parallel. Each of the returned partition tokens can be used
by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset
of the query result to read. The same session and read-only transaction
must be used by the PartitionQueryRequest used to create the
partition tokens and the ExecuteSqlRequests that use the partition tokens.
Partition tokens become invalid when the session used to create them
is deleted, is idle for too long, begins a new transaction, or becomes too
old. When any of these happen, it is not possible to resume the query, and
the whole operation must be restarted from the beginning.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def PartitionRead(self, request, context):
"""Creates a set of partition tokens that can be used to execute a read
operation in parallel. Each of the returned partition tokens can be used
by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read
result to read. The same session and read-only transaction must be used by
the PartitionReadRequest used to create the partition tokens and the
ReadRequests that use the partition tokens. There are no ordering
guarantees on rows returned among the returned partition tokens, or even
within each individual StreamingRead call issued with a partition_token.
Partition tokens become invalid when the session used to create them
is deleted, is idle for too long, begins a new transaction, or becomes too
old. When any of these happen, it is not possible to resume the read, and
the whole operation must be restarted from the beginning.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_SpannerServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateSession": grpc.unary_unary_rpc_method_handler(
servicer.CreateSession,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CreateSessionRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.SerializeToString,
),
"BatchCreateSessions": grpc.unary_unary_rpc_method_handler(
servicer.BatchCreateSessions,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsResponse.SerializeToString,
),
"GetSession": grpc.unary_unary_rpc_method_handler(
servicer.GetSession,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.GetSessionRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.SerializeToString,
),
"ListSessions": grpc.unary_unary_rpc_method_handler(
servicer.ListSessions,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsResponse.SerializeToString,
),
"DeleteSession": grpc.unary_unary_rpc_method_handler(
servicer.DeleteSession,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.DeleteSessionRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ExecuteSql": grpc.unary_unary_rpc_method_handler(
servicer.ExecuteSql,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.SerializeToString,
),
"ExecuteStreamingSql": grpc.unary_stream_rpc_method_handler(
servicer.ExecuteStreamingSql,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.SerializeToString,
),
"ExecuteBatchDml": grpc.unary_unary_rpc_method_handler(
servicer.ExecuteBatchDml,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlResponse.SerializeToString,
),
"Read": grpc.unary_unary_rpc_method_handler(
servicer.Read,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.SerializeToString,
),
"StreamingRead": grpc.unary_stream_rpc_method_handler(
servicer.StreamingRead,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.SerializeToString,
),
"BeginTransaction": grpc.unary_unary_rpc_method_handler(
servicer.BeginTransaction,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BeginTransactionRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.Transaction.SerializeToString,
),
"Commit": grpc.unary_unary_rpc_method_handler(
servicer.Commit,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitResponse.SerializeToString,
),
"Rollback": grpc.unary_unary_rpc_method_handler(
servicer.Rollback,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.RollbackRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"PartitionQuery": grpc.unary_unary_rpc_method_handler(
servicer.PartitionQuery,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionQueryRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.SerializeToString,
),
"PartitionRead": grpc.unary_unary_rpc_method_handler(
servicer.PartitionRead,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionReadRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.spanner.v1.Spanner", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))<|fim▁end|> | import grpc |
<|file_name|>MissingModuleMetadataException.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2019 MovingBlocks<|fim▁hole|> * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.terasology.gestalt.module.exceptions;
/**
* Exception for when metadata cannot be resolved for a module
*/
public class MissingModuleMetadataException extends RuntimeException {
public MissingModuleMetadataException() {
}
public MissingModuleMetadataException(String s) {
super(s);
}
public MissingModuleMetadataException(String s, Throwable throwable) {
super(s, throwable);
}
}<|fim▁end|> | * |
<|file_name|>perfect_number.py<|end_file_name|><|fim▁begin|>def is_perfect_number(n):<|fim▁hole|> sum = 0
for x in range(1, n):
if n % x == 0:
sum += x
return sum == n
num = int(input("Please enter a number to check if it is perfect or not"))
print(is_perfect_number(num))<|fim▁end|> | |
<|file_name|>bench.py<|end_file_name|><|fim▁begin|>from myhdl import *
from UK101AddressDecode import UK101AddressDecode
def bench():
AL = Signal(intbv(0)[16:])
MonitorRom = Signal(bool(0))
ACIA = Signal(bool(0))
KeyBoardPort = Signal(bool(0))
VideoMem = Signal(bool(0))
BasicRom = Signal(bool(0))
Ram = Signal(bool(0))
dut = UK101AddressDecode(
AL,
MonitorRom,
ACIA,
KeyBoardPort,
VideoMem,
BasicRom, <|fim▁hole|> for i in range(0, 2**16):
AL.next = i
yield delay(10)
raise StopSimulation()
return dut, stimulus
sim = Simulation(traceSignals(bench))
sim.run()<|fim▁end|> | Ram)
@instance
def stimulus(): |
<|file_name|>list_users_builder.rs<|end_file_name|><|fim▁begin|>use crate::prelude::*;
use crate::responses::ListUsersResponse;
use crate::ResourceType;
use azure_sdk_core::errors::{check_status_extract_headers_and_body, AzureError};
use azure_sdk_core::prelude::*;
use futures::stream::{unfold, Stream};
use hyper::StatusCode;
use std::convert::TryInto;
#[derive(Debug)]
pub struct ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
database_client: &'a dyn DatabaseClient<C>,
user_agent: Option<&'b str>,
activity_id: Option<&'b str>,
consistency_level: Option<ConsistencyLevel<'b>>,
continuation: Option<&'b str>,
max_item_count: i32,
}
impl<'a, 'b, C> Clone for ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
fn clone(&self) -> Self {
Self {
database_client: self.database_client,
user_agent: self.user_agent,
activity_id: self.activity_id,
consistency_level: self.consistency_level.clone(),
continuation: self.continuation,
max_item_count: self.max_item_count,
}
}
}
impl<'a, 'b, C> ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
#[inline]
pub(crate) fn new(database_client: &'a dyn DatabaseClient<C>) -> ListUsersBuilder<'a, 'b, C> {
ListUsersBuilder {
database_client,
user_agent: None,
activity_id: None,
consistency_level: None,
continuation: None,
max_item_count: -1,
}
}
}
impl<'a, 'b, C> DatabaseClientRequired<'a, C> for ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
#[inline]
fn database_client(&self) -> &'a dyn DatabaseClient<C> {
self.database_client
}
}
//get mandatory no traits methods
//set mandatory no traits methods
impl<'a, 'b, C> UserAgentOption<'b> for ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
#[inline]
fn user_agent(&self) -> Option<&'b str> {
self.user_agent
}
}
impl<'a, 'b, C> ActivityIdOption<'b> for ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
#[inline]
fn activity_id(&self) -> Option<&'b str> {
self.activity_id
}
}
impl<'a, 'b, C> ConsistencyLevelOption<'b> for ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
#[inline]
fn consistency_level(&self) -> Option<ConsistencyLevel<'b>> {
self.consistency_level.clone()
}
}
impl<'a, 'b, C> ContinuationOption<'b> for ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
#[inline]
fn continuation(&self) -> Option<&'b str> {
self.continuation
}
}
impl<'a, 'b, C> MaxItemCountOption for ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
#[inline]
fn max_item_count(&self) -> i32 {
self.max_item_count
}
}
impl<'a, 'b, C> UserAgentSupport<'b> for ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
type O = ListUsersBuilder<'a, 'b, C>;
#[inline]
fn with_user_agent(self, user_agent: &'b str) -> Self::O {
ListUsersBuilder {
database_client: self.database_client,
user_agent: Some(user_agent),
activity_id: self.activity_id,
consistency_level: self.consistency_level,
continuation: self.continuation,
max_item_count: self.max_item_count,
}
}
}
impl<'a, 'b, C> ActivityIdSupport<'b> for ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
type O = ListUsersBuilder<'a, 'b, C>;
#[inline]
fn with_activity_id(self, activity_id: &'b str) -> Self::O {
ListUsersBuilder {
database_client: self.database_client,
user_agent: self.user_agent,
activity_id: Some(activity_id),
consistency_level: self.consistency_level,
continuation: self.continuation,
max_item_count: self.max_item_count,
}
}
}
impl<'a, 'b, C> ConsistencyLevelSupport<'b> for ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
type O = ListUsersBuilder<'a, 'b, C>;
#[inline]
fn with_consistency_level(self, consistency_level: ConsistencyLevel<'b>) -> Self::O {
ListUsersBuilder {
database_client: self.database_client,
user_agent: self.user_agent,
activity_id: self.activity_id,
consistency_level: Some(consistency_level),
continuation: self.continuation,
max_item_count: self.max_item_count,
}
}
}
impl<'a, 'b, C> ContinuationSupport<'b> for ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
type O = ListUsersBuilder<'a, 'b, C>;
#[inline]
fn with_continuation(self, continuation: &'b str) -> Self::O {
ListUsersBuilder {
database_client: self.database_client,
user_agent: self.user_agent,
activity_id: self.activity_id,
consistency_level: self.consistency_level,
continuation: Some(continuation),
max_item_count: self.max_item_count,
}
}
}
impl<'a, 'b, C> MaxItemCountSupport for ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
type O = ListUsersBuilder<'a, 'b, C>;
#[inline]
fn with_max_item_count(self, max_item_count: i32) -> Self::O {
ListUsersBuilder {
database_client: self.database_client,
user_agent: self.user_agent,
activity_id: self.activity_id,
consistency_level: self.consistency_level,
continuation: self.continuation,
max_item_count,
}
}
}
// methods callable regardless
impl<'a, 'b, C> ListUsersBuilder<'a, 'b, C> where C: CosmosClient {}
// methods callable only when every mandatory field has been filled
impl<'a, 'b, C> ListUsersBuilder<'a, 'b, C>
where
C: CosmosClient,
{
pub async fn execute(&self) -> Result<ListUsersResponse, AzureError> {
trace!("ListUsersBuilder::execute called");
let req = self.database_client.cosmos_client().prepare_request(
&format!("dbs/{}/users", self.database_client.database_name()),
hyper::Method::GET,
ResourceType::Users,
);
let req = req.body(hyper::Body::empty())?;
debug!("\nreq == {:?}", req);
let (headers, body) = check_status_extract_headers_and_body(
self.database_client.hyper_client().request(req),
StatusCode::OK,
)
.await?;
Ok((&headers, &body as &[u8]).try_into()?)
}
pub fn stream(&self) -> impl Stream<Item = Result<ListUsersResponse, AzureError>> + '_ {
#[derive(Debug, Clone, PartialEq)]
enum States {
Init,
Continuation(String),
};
unfold(
Some(States::Init),
move |continuation_token: Option<States>| {
async move {
debug!("continuation_token == {:?}", &continuation_token);
let response = match continuation_token {
Some(States::Init) => self.execute().await,
Some(States::Continuation(continuation_token)) => {
self.clone()
.with_continuation(&continuation_token)
.execute()
.await
}
None => return None,
};
// the ? operator does not work in async move (yet?)
// so we have to resort to this boilerplate
let response = match response {
Ok(response) => response,
Err(err) => return Some((Err(err), None)),
};
let continuation_token = match &response.continuation_token {
Some(ct) => Some(States::Continuation(ct.to_owned())),
None => None,<|fim▁hole|> },
)
}
}<|fim▁end|> | };
Some((Ok(response), continuation_token))
} |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>'use strict';
const styles = require('./styles');
const permsToString = require('./perms-to-string');
const sizeToString = require('./size-to-string');
const sortFiles = require('./sort-files');
const fs = require('fs');
const path = require('path');
const he = require('he');
const etag = require('../etag');
const url = require('url');
const status = require('../status-handlers');
const supportedIcons = styles.icons;
const css = styles.css;
module.exports = (opts) => {
// opts are parsed by opts.js, defaults already applied
const cache = opts.cache;
const root = path.resolve(opts.root);
const baseDir = opts.baseDir;
const humanReadable = opts.humanReadable;
const hidePermissions = opts.hidePermissions;
const handleError = opts.handleError;
const showDotfiles = opts.showDotfiles;
const si = opts.si;
const weakEtags = opts.weakEtags;
return function middleware(req, res, next) {
// Figure out the path for the file from the given url
const parsed = url.parse(req.url);
const pathname = decodeURIComponent(parsed.pathname);
const dir = path.normalize(
path.join(
root,
path.relative(
path.join('/', baseDir),
pathname
)
)
);
fs.stat(dir, (statErr, stat) => {
if (statErr) {
if (handleError) {
status[500](res, next, { error: statErr });
} else {
next();
}
return;
}
// files are the listing of dir
fs.readdir(dir, (readErr, _files) => {
let files = _files;
if (readErr) {
if (handleError) {
status[500](res, next, { error: readErr });
} else {
next();
}
return;
}
// Optionally exclude dotfiles from directory listing.
if (!showDotfiles) {
files = files.filter(filename => filename.slice(0, 1) !== '.');
}
res.setHeader('content-type', 'text/html');
res.setHeader('etag', etag(stat, weakEtags));
res.setHeader('last-modified', (new Date(stat.mtime)).toUTCString());
res.setHeader('cache-control', cache);
function render(dirs, renderFiles, lolwuts) {
// each entry in the array is a [name, stat] tuple
let html = `${[
'<!doctype html>',
'<html>',
' <head>',
' <meta charset="utf-8">',
' <meta name="viewport" content="width=device-width">',
` <title>Index of ${he.encode(pathname)}</title>`,
` <style type="text/css">${css}</style>`,
' </head>',
' <body>',
`<h1>Index of ${he.encode(pathname)}</h1>`,
].join('\n')}\n`;
html += '<table>';
const failed = false;
const writeRow = (file) => {
// render a row given a [name, stat] tuple
const isDir = file[1].isDirectory && file[1].isDirectory();
let href = `${parsed.pathname.replace(/\/$/, '')}/${encodeURIComponent(file[0])}`;
// append trailing slash and query for dir entry
if (isDir) {
href += `/${he.encode((parsed.search) ? parsed.search : '')}`;
}
const displayName = he.encode(file[0]) + ((isDir) ? '/' : '');
const ext = file[0].split('.').pop();
const classForNonDir = supportedIcons[ext] ? ext : '_page';
const iconClass = `icon-${isDir ? '_blank' : classForNonDir}`;
// TODO: use stylessheets?
html += `${'<tr>' +
'<td><i class="icon '}${iconClass}"></i></td>`;
if (!hidePermissions) {
html += `<td class="perms"><code>(${permsToString(file[1])})</code></td>`;
}
html +=
`<td class="file-size"><code>${sizeToString(file[1], humanReadable, si)}</code></td>` +
`<td class="display-name"><a href="${href}">${displayName}</a></td>` +
'</tr>\n';
};
dirs.sort((a, b) => a[0].toString().localeCompare(b[0].toString())).forEach(writeRow);
renderFiles.sort((a, b) => a.toString().localeCompare(b.toString())).forEach(writeRow);
lolwuts.sort((a, b) => a[0].toString().localeCompare(b[0].toString())).forEach(writeRow);
html += '</table>\n';
html += `<br><address>Node.js ${
process.version
}/ <a href="https://github.com/jfhbrook/node-ecstatic">ecstatic</a> ` +
`server running @ ${
he.encode(req.headers.host || '')}</address>\n` +
'</body></html>'
;
if (!failed) {
res.writeHead(200, { 'Content-Type': 'text/html' });
res.end(html);
}
}
sortFiles(dir, files, (lolwuts, dirs, sortedFiles) => {
// It's possible to get stat errors for all sorts of reasons here.
// Unfortunately, our two choices are to either bail completely,<|fim▁hole|> // Whatever.
// if it makes sense to, add a .. link
if (path.resolve(dir, '..').slice(0, root.length) === root) {
fs.stat(path.join(dir, '..'), (err, s) => {
if (err) {
if (handleError) {
status[500](res, next, { error: err });
} else {
next();
}
return;
}
dirs.unshift(['..', s]);
render(dirs, sortedFiles, lolwuts);
});
} else {
render(dirs, sortedFiles, lolwuts);
}
});
});
});
};
};<|fim▁end|> | // or just truck along as though everything's cool. In this case,
// I decided to just tack them on as "??!?" items along with dirs
// and files.
// |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
@admin.register(Contact)
class ContactAdmin(admin.ModelAdmin):
pass<|fim▁end|> | from django.contrib import admin
from models.snippets import Contact |
<|file_name|>OrgName.tsx<|end_file_name|><|fim▁begin|>import React from 'react';
import PropTypes from 'prop-types';
/**
* Org Name - to display organisation name of user contact
*/<|fim▁hole|> {item.first_name && item.organisation && <i className="icon-business" />}
{
item.first_name && item.organisation &&
<span>{item.organisation}</span>
}
</div>
);
OrgName.propTypes = {
item: PropTypes.object,
};<|fim▁end|> | export const OrgName:React.StatelessComponent<any> = ({item}) => (
<div key="org-name" className="container"> |
<|file_name|>docs.js<|end_file_name|><|fim▁begin|><|fim▁hole|>(function( $, window, document ) {
// Masthead
// -----------------------------------
var $window = $(window),
winTop = $window.scrollTop(),
$masthead = $('.masthead'),
$mastheadTitle = $masthead.find('.page-title'),
$pageTitle = $('.jumbotron h1'),
threshold = $pageTitle.offset().top - $masthead.outerHeight(),
fadeIn, fadeOut;
$window.scroll(function(){
winTop = $window.scrollTop();
fadeIn = -1 + ( winTop / threshold );
fadeOut = 2 - ( winTop / (threshold/2) );
// ^ OFFSET ^ FADESPEED
// Numbers further from Higher numbers increase
// zero will delay the the speed of the fade.
// opacity change.
$mastheadTitle.css( 'opacity', fadeIn );
$pageTitle.css( 'opacity', fadeOut );
});
}( window.jQuery, window, document ));<|fim▁end|> | // JS specifically for the toolkit documentation |
<|file_name|>scheme.go<|end_file_name|><|fim▁begin|>/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0<|fim▁hole|>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
admissionv1 "k8s.io/api/admission/v1"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
func init() {
addToScheme(scheme)
}
func addToScheme(scheme *runtime.Scheme) {
utilruntime.Must(corev1.AddToScheme(scheme))
utilruntime.Must(appsv1.AddToScheme(scheme))
utilruntime.Must(batchv1.AddToScheme(scheme))
utilruntime.Must(admissionv1.AddToScheme(scheme))
}<|fim▁end|> | |
<|file_name|>instance-add.component.spec.ts<|end_file_name|><|fim▁begin|>/* tslint:disable:no-unused-variable */
import {async, ComponentFixture, TestBed} from '@angular/core/testing';<|fim▁hole|>import {InstanceAddComponent} from './instance-add.component';
describe('InstanceAddComponent', () => {
let component: InstanceAddComponent;
let fixture: ComponentFixture<InstanceAddComponent>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ InstanceAddComponent ]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(InstanceAddComponent);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
});
});<|fim▁end|> | import {By} from '@angular/platform-browser';
import {DebugElement} from '@angular/core';
|
<|file_name|>upgrade.py<|end_file_name|><|fim▁begin|>from ftw.upgrade import UpgradeStep
from plone import api
<|fim▁hole|>class AddLanguageIndex(UpgradeStep):
"""Add Language index."""
def __call__(self):
self.install_upgrade_profile()
ct = api.portal.get_tool("portal_catalog")
survey_results = ct(portal_type="euphorie.survey")
for brain in survey_results:
survey = brain.getObject()
survey.reindexObject(idxs=["Language"])<|fim▁end|> | |
<|file_name|>edukits_total_retail_report.py<|end_file_name|><|fim▁begin|>import time
from datetime import datetime
from pytz import timezone
from dateutil.relativedelta import relativedelta
import openerp
from openerp.report.interface import report_rml
from openerp.tools import to_xml
from openerp.report import report_sxw
from datetime import datetime
from openerp.tools.translate import _
from openerp.osv import osv, fields, orm, fields
import math
import re
class edukits_total_retail(report_rml):
def create_xml(self,cr,uid,ids,datas,context={}):
def _thousand_separator(decimal,amount):
if not amount:
amount = 0.0
if type(amount) is float :
amount = str(decimal%amount)
else :
amount = str(amount)
if (amount == '0'):
return ' '
orig = amount
new = re.sub("^(-?\d+)(\d{3})", "\g<1>.\g<2>", amount)
if orig == new:
return new
else:
return _thousand_separator(decimal,new)
pool = openerp.registry(cr.dbname)
order_obj = pool.get('sale.order')
wh_obj = pool.get('stock.warehouse')
session_obj = pool.get('pos.session')
user_obj = pool.get('res.users')
users = user_obj.browse(cr,uid,uid)
warehouse_ids = datas['form']['warehouse_ids'] or wh_obj.search(cr, uid, [])
company = users.company_id
rml_parser = report_sxw.rml_parse(cr, uid, 'edukits_total_retail', context=context)
rml = """
<document filename="test.pdf">
<template pageSize="(21.0cm,29.7cm)" title="Total Retail Report" author="SGEEDE" allowSplitting="20">
<pageTemplate id="first">
<frame id="first" x1="50.0" y1="0.0" width="500" height="830"/>
</pageTemplate>
</template>
<stylesheet>
<blockTableStyle id="Table1">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<lineStyle kind="LINEBEFORE" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEAFTER" colorName="#000000" start="0,0" stop="-1,-1"/>
</blockTableStyle>
<blockTableStyle id="parent_table">
<blockAlignment value="LEFT"/>
<blockLeftPadding start="0,0" length="0.1cm"/>
<blockRightPadding start="0,0" length="0.1cm"/>
<blockTopPadding start="0,0" length="0.15cm"/>
<blockBottomPadding start="0,0" length="0.15cm"/>
</blockTableStyle>
<blockTableStyle id="Table2">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<lineStyle kind="LINEBEFORE" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEAFTER" colorName="#000000" start="0,0" stop="-1,-1"/>
</blockTableStyle>
<blockTableStyle id="Table3">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
</blockTableStyle>
<blockTableStyle id="Table3_Normal">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<blockTopPadding start="0,0" length="-0.15cm"/>
<lineStyle kind="LINEBEFORE" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,1" stop="0,1"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="0,0"/>
<lineStyle kind="LINEAFTER" colorName="#000000" start="0,0" stop="-1,-1"/>
</blockTableStyle>
<blockTableStyle id="Table3_PARENT">
<blockAlignment value="CENTER"/>
<blockValign value="TOP"/>
</blockTableStyle>
"""
for warehouse in wh_obj.browse(cr,uid,warehouse_ids):
if warehouse.color:
rml += """
<blockTableStyle id="Table3""" + to_xml(str(warehouse.color.name)) + """">
<blockBackground colorName="#"""+ to_xml(str(warehouse.color.color)) + """" start="0,0" stop="0,-1"/>
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<blockTopPadding start="0,0" length="0.1cm"/>
<lineStyle kind="LINEBEFORE" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,1" stop="0,1"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="0,0"/>
<lineStyle kind="LINEAFTER" colorName="#000000" start="0,0" stop="-1,-1"/>
</blockTableStyle>
"""
if not warehouse.color:
rml += """
<blockTableStyle id="Table3False">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<blockTopPadding start="0,0" length="0.1cm"/>
<lineStyle kind="LINEBEFORE" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,1" stop="0,1"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="0,0"/>
<lineStyle kind="LINEAFTER" colorName="#000000" start="0,0" stop="-1,-1"/>
</blockTableStyle>
"""
rml += """
<blockTableStyle id="Table3_LINE">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="2,0" stop="2,3"/>
</blockTableStyle>
<blockTableStyle id="Table3_LINE2">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
</blockTableStyle>
<blockTableStyle id="Table3_LINE2W">
<blockBackground colorName="white"/>
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
</blockTableStyle>
<blockTableStyle id="Table1_line">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,0" stop="2,0"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="2,0"/>
</blockTableStyle>
<blockTableStyle id="Table1_lines">
<blockBackground colorName="white"/>
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,0" stop="2,0"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="2,0"/>
<lineStyle kind="LINEBEFORE" colorName="#000000" start="0,0" stop="2,0"/>
<lineStyle kind="LINEAFTER" colorName="#000000" start="0,0" stop="2,0"/>
</blockTableStyle>
<initialize>
<paraStyle name="all" alignment="justify"/>
</initialize>
<paraStyle name="P1" fontName="Helvetica" fontSize="9.0" leading="11" alignment="CENTER" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P2" fontName="Helvetica-Bold" fontSize="14.0" leading="17" alignment="RIGHT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P3" fontName="Times-Roman" fontSize="11.0" leading="10" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P4" fontName="Times-Roman" fontSize="11.0" leading="10" alignment="RIGHT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P5" fontName="Times-Roman" fontSize="11.0" leading="10" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P6" fontName="Helvetica" fontSize="9.0" leading="11" alignment="LEFT" spaceBefore="0.0" spaceAfter="6.0"/>
<paraStyle name="P7" fontName="Helvetica" fontSize="9.0" leading="11" alignment="CENTER" spaceBefore="0.0" spaceAfter="6.0"/>
<paraStyle name="P8" fontName="Helvetica" fontSize="8.0" leading="10" alignment="LEFT" spaceBefore="0.0" spaceAfter="6.0"/>
<paraStyle name="P9" fontName="Times-Roman" fontSize="11.0" leading="14" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P10" fontName="Times-Roman" fontSize="11.0" leading="14" alignment="RIGHT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P11" fontName="Times-Roman" fontSize="11.0" leading="14" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P12" fontName="Helvetica" fontSize="8.0" leading="10" alignment="LEFT" spaceBefore="0.0" spaceAfter="6.0"/>
<paraStyle name="P13" fontName="Helvetica" fontSize="8.0" leading="10" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P14" fontName="Helvetica-Bold" fontSize="12.0" leading="11" alignment="LEFT" spaceBefore="0.0" spaceAfter="1.0"/>
<paraStyle name="P15" textColor="black" fontName="Helvetica" fontSize="10.0" leading="11" alignment="LEFT" spaceBefore="0.0" spaceAfter="1.0"/>
<paraStyle name="P15_W" textColor="white" fontName="Helvetica" fontSize="10.0" leading="11" alignment="LEFT" spaceBefore="0.0" spaceAfter="1.0"/>
<paraStyle name="P15_RIGHT" textColor="black" fontName="Helvetica" fontSize="10.0" leading="11" alignment="RIGHT" spaceBefore="0.0" spaceAfter="1.0"/>
<paraStyle name="P15_CENTER" textColor="black" fontName="Helvetica-Bold" fontSize="12.0" leading="11" alignment="CENTER" spaceBefore="0.0" spaceAfter="1.0"/>
<paraStyle name="P15_CENTER_2" textColor="black" fontName="Helvetica-Bold" fontSize="14.0" leading="11" alignment="CENTER" spaceBefore="0.0" spaceAfter="1.0"/>
<paraStyle name="P16" fontName="Helvetica" fontSize="9.0" leading="11" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P17" fontName="Times-Roman" fontSize="8.0" leading="11" alignment="LEFT" spaceBefore="0.0" spaceAfter="1.0"/>
<paraStyle name="P19" rightIndent="0.0" leftIndent="0.0" fontName="Times-Roman" fontSize="10.0" leading="11" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P20" rightIndent="0.0" leftIndent="0.0" fontName="Helvetica" fontSize="12.0" leading="11" alignment="CENTER" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="Standard" fontName="Times-Roman"/>
<paraStyle name="Text body" fontName="Times-Roman" spaceBefore="0.0" spaceAfter="6.0"/>
<paraStyle name="List" fontName="Times-Roman" spaceBefore="0.0" spaceAfter="6.0"/>
<paraStyle name="Table Contents" fontName="Times-Roman" spaceBefore="0.0" spaceAfter="6.0"/>
<paraStyle name="Table Heading" fontName="Times-Roman" alignment="CENTER" spaceBefore="0.0" spaceAfter="6.0"/>
<paraStyle name="Caption" fontName="Times-Roman" fontSize="10.0" leading="13" spaceBefore="6.0" spaceAfter="6.0"/>
<paraStyle name="Index" fontName="Times-Roman"/>
<paraStyle name="Heading" fontName="Helvetica" fontSize="15.0" leading="19" spaceBefore="12.0" spaceAfter="6.0"/>
<paraStyle name="Footer" fontName="Times-Roman"/>
<paraStyle name="Horizontal Line" fontName="Times-Roman" fontSize="6.0" leading="8" spaceBefore="0.0" spaceAfter="14.0"/>
<paraStyle name="terp_header" fontName="Helvetica-Bold" fontSize="15.0" leading="19" alignment="LEFT" spaceBefore="12.0" spaceAfter="6.0"/>
<paraStyle name="Heading 9" fontName="Helvetica-Bold" fontSize="75%" leading="NaN" spaceBefore="12.0" spaceAfter="6.0"/>
<paraStyle name="terp_tblheader_General" fontName="Helvetica-Bold" fontSize="8.0" leading="10" alignment="LEFT" spaceBefore="6.0" spaceAfter="6.0"/>
<paraStyle name="terp_tblheader_Details" fontName="Helvetica-Bold" fontSize="9.0" leading="11" alignment="LEFT" spaceBefore="6.0" spaceAfter="6.0"/>
<paraStyle name="terp_default_8" fontName="Helvetica" fontSize="9.0" leading="10" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="terp_default_Bold_8" fontName="Helvetica-Bold" fontSize="8.0" leading="10" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="terp_tblheader_General_Centre" fontName="Helvetica-Bold" fontSize="8.0" leading="10" alignment="CENTER" spaceBefore="6.0" spaceAfter="6.0"/>
<paraStyle name="terp_tblheader_General_Right" fontName="Helvetica-Bold" fontSize="8.0" leading="10" alignment="RIGHT" spaceBefore="6.0" spaceAfter="6.0"/>
<paraStyle name="terp_tblheader_Details_Centre" fontName="Helvetica-Bold" fontSize="9.0" leading="11" alignment="CENTER" spaceBefore="6.0" spaceAfter="6.0"/>
<paraStyle name="terp_tblheader_Details_Right" fontName="Helvetica-Bold" fontSize="9.0" leading="11" alignment="RIGHT" spaceBefore="6.0" spaceAfter="6.0"/>
<paraStyle name="terp_default_Right_8" fontName="Helvetica" fontSize="8.0" leading="10" alignment="RIGHT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="terp_default_Centre_8" fontName="Helvetica" fontSize="8.0" leading="10" alignment="CENTER" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="terp_header_Right" fontName="Helvetica-Bold" fontSize="15.0" leading="19" alignment="LEFT" spaceBefore="12.0" spaceAfter="6.0"/>
<paraStyle name="terp_header_Centre" fontName="Helvetica-Bold" fontSize="15.0" leading="19" alignment="CENTER" spaceBefore="12.0" spaceAfter="6.0"/>
<paraStyle name="terp_header_Centre2" fontName="Helvetica-Bold" fontSize="12.0" leading="19" alignment="CENTER" spaceBefore="12.0" spaceAfter="6.0"/>
<paraStyle name="terp_header_Centre3" fontName="Helvetica-Bold" fontSize="12.0" leading="19" alignment="LEFT" spaceBefore="12.0" spaceAfter="6.0"/>
<paraStyle name="terp_default_address" fontName="Helvetica" fontSize="10.0" leading="13" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="terp_default_9" fontName="Helvetica" fontSize="9.0" leading="11" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="terp_default_12" fontName="Helvetica" fontSize="12.0" leading="11" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="terp_default_Bold_9" fontName="Helvetica-Bold" fontSize="9.0" leading="11" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="terp_default_Bold_9_Right" fontName="Helvetica-Bold" fontSize="9.0" leading="11" alignment="RIGHT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="terp_default_Centre_9" fontName="Helvetica" fontSize="9.0" leading="11" alignment="CENTER" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="terp_default_Right_9" fontName="Helvetica" fontSize="9.0" leading="11" alignment="RIGHT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="Heading 1" fontName="Times-Bold" fontSize="24.0" leading="29" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="Heading 2" fontName="Times-Bold" fontSize="20.0" leading="29" spaceBefore="0.0" spaceAfter="0.0"/>
<images/>
</stylesheet>
<story>
"""
no_total = 1
rml += """
<blockTable colWidths="250,250" style="Table3_PARENT">
"""
# Day transaction for batamcentre
center = False
currency_amount = 0
currency_symbol =''
bank_ids = []
date_end = datetime.strptime(datas['form']['date_end'],"%Y-%m-%d")
# Normal transaction
for warehouse in wh_obj.browse(cr,uid,warehouse_ids):
currency_amount = warehouse.currency_id.rate_silent
location_id = warehouse.lot_stock_id.id
results = []
total_bank = 0.0
if warehouse.is_split:
date_start_day = datetime.strptime(datas['form']['date_end']+ ' 00:00:00',"%Y-%m-%d %H:%M:%S")
date_stop_day = datetime.strptime(datas['form']['date_end']+ ' 17:59:59',"%Y-%m-%d %H:%M:%S")
date_start = datetime.strptime(datas['form']['date_end']+ ' 18:00:00',"%Y-%m-%d %H:%M:%S")
date_stop = datetime.strptime(datas['form']['date_end']+ ' 23:59:59',"%Y-%m-%d %H:%M:%S")
sessions_ids = session_obj.search(cr,uid,[('stock_location_rel','=',location_id),('stop_at','!=',False)])
session_night_ids = []
session_day_ids = []
for sessions in session_obj.browse(cr,uid,sessions_ids):
stop_temp=datetime.strptime(sessions.stop_at,"%Y-%m-%d %H:%M:%S")
tz_count = 0
hour_offset = ""
minute_offset = ""
for tz_offset in users.tz_offset:
tz_count +=1
if tz_count <= 3:
hour_offset += tz_offset
elif tz_count <= 5:
minute_offset +=tz_offset
stop_at= stop_temp + relativedelta(hours=int(hour_offset))
if (stop_at >= date_start) and (stop_at <= date_stop):
session_night_ids.append(sessions.id)
if (stop_at >= date_start_day) and (stop_at <= date_stop_day):
session_day_ids.append(sessions.id)
# if not warehouse.is_split:
session_ids = session_obj.search(cr,uid,[('stop_at','>=',datas['form']['date_end']+ ' 00:00:00'),('stop_at','<=',datas['form']['date_end']+ ' 23:59:59'),('stock_location_rel','=',location_id)])
if len(warehouse_ids) == 1:
rml += """
<tr>
<td>
"""
elif no_total % 2 == 0:
rml += """<td>"""
else:
rml += """
<tr>
<td>
"""
if warehouse.color:
rml += """
<blockTable colWidths="210" style="Table3">
"""
if not warehouse.color:
rml += """
<blockTable colWidths="210" style="Table3_Normal">
"""
rml += """
<tr>
</tr>
<tr>
<td>
<blockTable rowHeights="38" colWidths="198" style="Table3""" + to_xml(str(warehouse.color.name)) + """">
<tr>
<td>
<para style="P15_CENTER_2">"""+ to_xml(str(warehouse.name)) + """</para>
</td>
</tr>
</blockTable>
<blockTable colWidths="198" style="Table1_lines">
<tr>
<td>
<para style="P15">TGL: """+ to_xml(str(format(date_end,'%d-%B-%y')))+"""</para>
</td>
</tr>
</blockTable>
<blockTable rowHeights="17" colWidths="198" style="Table3""" + to_xml(str(warehouse.color.name)) + """">
<tr>
<td background="pink">
<para style="P15_CENTER">SETORAN</para>
</td>
</tr>
</blockTable>
<blockTable colWidths="198" style="Table1_lines">
<tr>
<td>
"""
total_card = 0.0
# if not session_ids:
# rml +="""
# <para style="P15">-</para>
# """
total_amount = 0.0
total_amount_night = 0.0
#for day transaction if report is split
if warehouse.is_split:
for session in session_obj.browse(cr,uid,session_day_ids):
for bank in session.statement_ids:
if bank.journal_id.type == 'bank':
total_card +=bank.balance_end
if session.cashier_deposit_ids:
for cashier in session.cashier_deposit_ids:
total_amount += cashier.amount_total
else:
for session in session_obj.browse(cr,uid,session_ids):
for bank in session.statement_ids:
if bank.journal_id.type == 'bank':
total_card +=bank.balance_end
if session.cashier_deposit_ids:
for cashier in session.cashier_deposit_ids:
total_amount += cashier.amount_total
rml += """
<para style="P15">""" + rml_parser.formatLang(total_amount+0, currency_obj=company.currency_id) + """</para>
"""
# if warehouse.is_split:
if session_ids:
sessions = session_obj.browse(cr,uid,session_ids[0])
if warehouse.is_split:
rml += """
</td>
</tr>
</blockTable>
<blockTable rowHeights="17" colWidths="198" style="Table3""" + to_xml(str(warehouse.color.name)) + """">
<tr>
<td background="pink">
<para style="P15_CENTER">SETORAN (Malam)</para>
</td>
</tr>
</blockTable>
<blockTable colWidths="198" style="Table1_lines">
<tr>
<td>
"""
for session in session_obj.browse(cr,uid,session_night_ids):
for bank in session.statement_ids:
if bank.journal_id.type == 'bank':
total_card +=bank.balance_end
if session.cashier_deposit_ids:
for cashier in session.cashier_deposit_ids:
total_amount_night += cashier.amount_total
rml += """
<para style="P15">""" + rml_parser.formatLang(total_amount_night+0, currency_obj=company.currency_id) + """</para>
"""
# if not session_night_ids:
# rml +="""
# <para style="P15">-</para>
# """
#normal transaction
rml += """
</td>
</tr>
</blockTable>
<blockTable rowHeights="17" colWidths="198" style="Table3""" + to_xml(str(warehouse.color.name)) + """">
<tr>
<td background="pink">
<para style="P15_CENTER">CC and DC</para>
</td>
</tr>
</blockTable>
<blockTable colWidths="100,98" style="Table1_lines">
<tr>
<td>
"""
if not session_ids:
rml +="""
<para style="P15">-</para>
"""
session_list = []
bank_ids = []
for session in session_obj.browse(cr,uid,session_ids):
session_list.append(session.id)
# for bank in session.statement_ids:
# if bank.journal_id.type == 'bank':
# rml +="""
# <para style="P15">""" + to_xml(str(bank.journal_id.name)) + """</para>
# """
if len(session_list) == 1:
cr.execute(""" SELECT sum(abs.balance_end), aj.name from account_bank_statement abs inner join account_journal aj on abs.journal_id = aj.id where pos_session_id = %s and aj.type != 'cash' group by aj.name; """ % (tuple(session_list)[0],))
bank_ids = cr.fetchall()
if len(session_list) > 1:
cr.execute(""" SELECT sum(abs.balance_end), aj.name from account_bank_statement abs inner join account_journal aj on abs.journal_id = aj.id where pos_session_id in %s and aj.type != 'cash' group by aj.name; """ % (tuple(session_list),))
bank_ids = cr.fetchall()
if bank_ids:
for edukits_bank in bank_ids:
rml +="""
<para style="P15">""" + to_xml(str(edukits_bank[1])) + """</para>
"""
rml +="""
</td>
<td>
"""
if not session_ids:
rml +="""
<para style="P15">-</para>
"""
if bank_ids:
for edukits_bank in bank_ids:
total_bank_amount = 0
if edukits_bank[0]:
total_bank_amount = edukits_bank[0]
total_bank += edukits_bank[0]
rml +="""
<para style="P15">""" + rml_parser.formatLang(total_bank_amount+0,currency_obj=company.currency_id) + """</para>
"""
rml +="""
</td>
</tr>
</blockTable>
<blockTable rowHeights="17" colWidths="198" style="Table3""" + to_xml(str(warehouse.color.name)) + """">
<tr>
<td background="pink">
<para style="P15_CENTER">PENGELUARAN</para>
</td>
</tr>
</blockTable>
<blockTable colWidths="198" style="Table1_lines">
<tr>
<td background="pink">
<para style="P15_W">Table</para>
</td>
</tr>
</blockTable>
<blockTable colWidths="198" style="Table1_lines">
<tr>
<td background="pink">
<para style="P15_W">Table</para>
</td>
</tr>
</blockTable>
<blockTable colWidths="80,118" style="Table1_lines">
<tr>
<td>
<para style="P15">MAITRI</para>
</td>
<td>
<para style="P15_RIGHT"></para>
<para style="P15_RIGHT">""" + rml_parser.formatLang(total_amount +total_amount_night+ total_bank+0, currency_obj=company.currency_id) +"""</para>
</td>
</tr>
</blockTable>
<blockTable colWidths="80,118" style="Table1_lines">
<tr>
<td>
<para style="P15">KURS :""" + rml_parser.formatLang(currency_amount,) +"""</para>
</td>
<td>
<para style="P15_RIGHT">""" + rml_parser.formatLang( (total_amount+total_amount_night)*currency_amount, currency_obj=warehouse.currency_id) +"""</para>
</td>
</tr>
</blockTable>
<blockTable colWidths="80,5,110" style="Table3_LINE2">
<tr>
<td>
<para style="P15"></para>
</td>
<td>
<para style="P15"></para>
</td>
<td>
<para style="P15_CENTER"></para>
</td>
</tr>
</blockTable>
</td>
</tr>
</blockTable>
<spacer length="0.5cm"/>"""
rml += """
</td>
"""
if center:
if len(warehouse_ids) == 1:
rml += """<td></td>"""
rml += """
</tr>
"""
elif ( (no_total % 2 == 1 ) and (len(warehouse_ids)+1 == no_total)):
rml += """<td></td>"""
rml += """
</tr>
"""
elif no_total % 2 == 0:
rml += """
</tr>
"""
else:
if len(warehouse_ids)+1 == no_total:
rml += """
</tr>
"""
else:
if len(warehouse_ids) == 1:
rml += """<td></td>"""
rml += """
</tr>
"""
<|fim▁hole|> rml += """
</tr>
"""
elif no_total % 2 == 0:
rml += """
</tr>
"""
else:
if len(warehouse_ids) == no_total:
rml += """
</tr>
"""
no_total += 1
rml += """
</blockTable>
</story>
</document>"""
date_cur = time.strftime('%Y-%m-%d %H:%M:%S')
return rml
edukits_total_retail('report.edukits.total.retail', 'pos.session', '', '')<|fim▁end|> | elif ( (no_total % 2 == 1 ) and (len(warehouse_ids) == no_total)):
rml += """<td></td>""" |
<|file_name|>shadowed-argument.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print x
// gdb-check:$1 = false
// gdb-command:print y
// gdb-check:$2 = true
// gdb-command:continue
// gdb-command:print x
// gdb-check:$3 = 10
// gdb-command:print y
// gdb-check:$4 = true
// gdb-command:continue
// gdb-command:print x
// gdb-check:$5 = 10.5
// gdb-command:print y
// gdb-check:$6 = 20
// gdb-command:continue
<|fim▁hole|>// lldb-command:print x
// lldb-check:[...]$0 = false
// lldb-command:print y
// lldb-check:[...]$1 = true
// lldb-command:continue
// lldb-command:print x
// lldb-check:[...]$2 = 10
// lldb-command:print y
// lldb-check:[...]$3 = true
// lldb-command:continue
// lldb-command:print x
// lldb-check:[...]$4 = 10.5
// lldb-command:print y
// lldb-check:[...]$5 = 20
// lldb-command:continue
#![omit_gdb_pretty_printer_section]
fn a_function(x: bool, y: bool) {
zzz(); // #break
sentinel();
let x = 10;
zzz(); // #break
sentinel();
let x = 10.5f64;
let y = 20;
zzz(); // #break
sentinel();
}
fn main() {
a_function(false, true);
}
fn zzz() {()}
fn sentinel() {()}<|fim▁end|> | // === LLDB TESTS ==================================================================================
// lldb-command:run
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>
pub use self::tensor::{
Covector, InnerProduct, InvTwoForm, Matrix, Scalar, Tensor, TwoForm, Vector,
};
pub use self::variance::{
Concat, Contract, Contracted, ContravariantIndex, CovariantIndex, IndexType, Joined,
OtherIndex, TensorIndex, Variance,
};<|fim▁end|> | //! Module containing definitions of tensors and operations on them.
mod tensor;
mod variance; |
<|file_name|>annotateable.rs<|end_file_name|><|fim▁begin|>//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use toml::Value;
use libimagstore::store::Entry;
use libimagstore::store::FileLockEntry;
use libimagstore::store::Store;
use libimagstore::storeid::IntoStoreId;
use libimagstore::storeid::StoreIdIterator;
use libimagentrylink::internal::InternalLinker;
use libimagentryutil::isa::Is;
use libimagentryutil::isa::IsKindHeaderPathProvider;
use toml_query::read::TomlValueReadExt;
use toml_query::insert::TomlValueInsertExt;
use error::Result;
use error::AnnotationErrorKind as AEK;
use error::AnnotationError as AE;
use error::ResultExt;
<|fim▁hole|> fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>>;
fn annotations<'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>>;
fn is_annotation(&self) -> Result<bool>;
}
provide_kindflag_path!(IsAnnotation, "annotation.is_annotation");
impl Annotateable for Entry {
/// Annotate an entry, returns the new entry which is used to annotate
fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>> {
use module_path::ModuleEntryPath;
store.retrieve(ModuleEntryPath::new(ann_name).into_storeid()?)
.map_err(From::from)
.and_then(|mut anno| {
{
let _ = anno.set_isflag::<IsAnnotation>()?;
let _ = anno
.get_header_mut()
.insert("annotation.name", Value::String(String::from(ann_name)))?;
}
Ok(anno)
})
.and_then(|mut anno| {
anno.add_internal_link(self)
.chain_err(|| AEK::LinkingError)
.map(|_| anno)
})
}
/// Checks the current entry for all annotations and removes the one where the name is
/// `ann_name`, which is then returned
fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>> {
for annotation in self.annotations(store)? {
let mut anno = annotation?;
let name = match anno.get_header().read("annotation.name")? {
None => continue,
Some(val) => match *val {
Value::String(ref name) => name.clone(),
_ => return Err(AE::from_kind(AEK::HeaderTypeError)),
},
};
if name == ann_name {
let _ = self.remove_internal_link(&mut anno)?;
return Ok(Some(anno));
}
}
Ok(None)
}
/// Get all annotations of an entry
fn annotations<'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>> {
self.get_internal_links()
.map_err(From::from)
.map(|iter| StoreIdIterator::new(Box::new(iter.map(|e| e.get_store_id().clone()))))
.map(|i| AnnotationIter::new(i, store))
}
fn is_annotation(&self) -> Result<bool> {
self.is::<IsAnnotation>().map_err(From::from)
}
}<|fim▁end|> | use iter::*;
pub trait Annotateable {
fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>>; |
<|file_name|>deploy_gcf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script for deploying cloud functions."""
from __future__ import print_function
import subprocess
import sys
from turbinia import config
index_file = './index.yaml'
if len(sys.argv) > 1:
function_names = [sys.argv[1]]
else:
function_names = ['gettasks', 'closetasks']
config.LoadConfig()
for cloud_function in function_names:
print('Deploying function {0:s}'.format(cloud_function))
cmd = (
'gcloud --project {0:s} functions deploy {1:s} --stage-bucket {2:s} '
'--region {3:s} --runtime nodejs14 --trigger-http --memory 256MB '<|fim▁hole|> print(subprocess.check_call(cmd, shell=True))
print('/nCreating Datastore index from {0:s}'.format(index_file))
cmd = 'gcloud --quiet --project {0:s} datastore indexes create {1:s}'.format(
config.TURBINIA_PROJECT, index_file)
subprocess.check_call(cmd, shell=True)<|fim▁end|> | '--timeout 60s'.format(
config.TURBINIA_PROJECT, cloud_function, config.BUCKET_NAME,
config.TURBINIA_REGION)) |
<|file_name|>interpolators.py<|end_file_name|><|fim▁begin|># Rekall Memory Forensics
# Copyright 2016 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This module defines interpolators for the common OSs.
Globs and Artifacts may expand interpolations from the KnowledgeBase. This
module provides a live, on demand, KnowledgeBase.
"""
from builtins import object
import os
import re
import platform
from rekall import kb
from rekall_lib import registry
class KnowledgeBase(object):
def __init__(self, session):
self.session = session
def expand(self, variable):
return []
class LinuxKnowledgeBase(KnowledgeBase):
@registry.memoize
def _get_users_homedir(self):
homedirs = []
for user in open("/etc/passwd"):
user = user.strip()
homedirs.append(user.split(":")[5])
return homedirs
def expand(self, variable):
if variable == "%%users.homedir%%":
return self._get_users_homedir()
self.session.logging.warn("Unable to interpolate %s", variable)
return []
class WindowsKnowledgeBase(KnowledgeBase):
@registry.memoize
def _get_sids(self):
result = []
for hit in self.session.plugins.glob(
r"HKEY_USERS\*", filesystem="Reg", root="\\",
path_sep="\\").collect():
path = hit["path"]
m = re.search(
r"(S-(\d+-)+\d+)$", path.filename.name or "", re.I)
if m:
result.append(m.group(1))
return result
@registry.memoize
def _get_homedirs(self):
"""On windows the homedirs are the paths of the user's profile."""
result = []
for artifact_hit in self.session.plugins.artifact_collector(
"WindowsRegistryProfiles"):
for hit_result in artifact_hit.get("result", []):<|fim▁hole|> if profile_path:
result.append(profile_path)
return result
def expand(self, variable):
if variable == "%%users.sid%%":
return self._get_sids()
if variable == "%%users.homedir%%":
return self._get_homedirs()
if variable == "%%environ_systemroot%%":
return [os.environ["systemroot"]]
return []
class KnowledgeBaseHook(kb.ParameterHook):
name = "knowledge_base"
def calculate(self):
if platform.system() == "Linux":
return LinuxKnowledgeBase(self.session)
elif platform.system() == "Windows":
return WindowsKnowledgeBase(self.session)<|fim▁end|> | profile_path = hit_result.get("value") |
<|file_name|>DatePicker.js<|end_file_name|><|fim▁begin|>'use strict';
var React = require('react');
var classNames = require('classnames');
var fecha = require('fecha');
var ClassNameMixin = require('./mixins/ClassNameMixin');
var dateUtils = require('./utils/dateUtils');
var Icon = require('./Icon');
var DatePicker = React.createClass({
mixins: [ClassNameMixin],
propTypes: {
onSelect: React.PropTypes.func.isRequired,
onClose: React.PropTypes.func,
getWidget: React.PropTypes.func,
onSubtractMonth: React.PropTypes.func,
onAddMonth: React.PropTypes.func,
viewMode: React.PropTypes.string,
minViewMode: React.PropTypes.string,
daysOfWeekDisabled: React.PropTypes.array,
format: React.PropTypes.string,
date: React.PropTypes.object,
weekStart: React.PropTypes.number,
minDate: React.PropTypes.string,
maxDate: React.PropTypes.string,
locale: React.PropTypes.string
},
getDefaultProps: function() {
return {
classPrefix: 'datepicker',
date: new Date(),
daysOfWeekDisabled: [],
viewMode: 'days',
minViewMode: 'days',
format: 'YYYY-MM-DD',
displayed: {
days: {display: 'block'},
months: {display: 'none'},
years: {display: 'none'}
}
};
},
getInitialState: function() {
var displayed;
switch (this.props.viewMode) {
case 'days':
displayed = {
days: {display: 'block'},
months: {display: 'none'},
years: {display: 'none'}
};
break;
case 'months':
displayed = {
days: {display: 'none'},
months: {display: 'block'},
years: {display: 'none'}
};
break;
case 'years':
displayed = {
days: {display: 'none'},
months: {display: 'none'},
years: {display: 'block'}
};
break;
}
return {
locale: dateUtils.getLocale(this.props.locale),
viewDate: this.props.date,
selectedDate: this.props.date,
displayed: displayed
};
},
// DaysPicker props function
subtractMonth: function() {
const { prevLoading, viewDate } = this.state;
if(prevLoading){
return;
}
var newDate = new Date(viewDate.valueOf());
newDate.setMonth(viewDate.getMonth() - 1);
const { onSubtractMonth } = this.props;
if(onSubtractMonth) {
this.setState({
prevLoading: true
});
onSubtractMonth(newDate, () => {
this.setState({
viewDate: newDate,
prevLoading: false
});
});
}
else{
this.setState({
viewDate: newDate
});
}
},
addMonth: function() {
const { nextLoadingIcon, viewDate } = this.state;
if(nextLoadingIcon){
return;
}
var newDate = new Date(viewDate.valueOf());
newDate.setMonth(viewDate.getMonth() + 1);
const { onAddMonth } = this.props;
if(onAddMonth) {
this.setState({
nextLoading: true
});
onAddMonth(newDate, () => {
this.setState({
viewDate: newDate,
nextLoading: false
});
});
}
else{
this.setState({
viewDate: newDate
});
}
},
setSelectedDate: function(params) {
const { className, date } = params;
if (/disabled|new|old/ig.test(className)) {
return;
}
var viewDate = this.state.viewDate;
//if (/new/ig.test(className)) {
// viewDate.setMonth(viewDate.getMonth() + 1);
//} else if (/old/ig.test(className)) {
// viewDate.setMonth(viewDate.getMonth() - 1);
//}
viewDate.setDate(date);
this.setViewDate(viewDate);
},
setViewDate: function(viewDate) {
this.setState({
viewDate: viewDate,
selectedDate: new Date(viewDate.valueOf())
}, function() {
this.props.onSelect(this.state.selectedDate);
this.props.onClose && this.props.onClose();
});
},
showMonths: function() {
return this.setState({
displayed: {
days: {display: 'none'},
months: {display: 'block'},
years: {display: 'none'}
}
});
},
// MonthsPicker props function
subtractYear: function() {
var viewDate = this.state.viewDate;
var newDate = new Date(viewDate.valueOf());
newDate.setFullYear(viewDate.getFullYear() - 1);
return this.setState({
viewDate: newDate
});
},
addYear: function() {
var viewDate = this.state.viewDate;
var newDate = new Date(viewDate.valueOf());
newDate.setFullYear(viewDate.getFullYear() + 1);<|fim▁hole|> });
},
showYears: function() {
return this.setState({
displayed: {
days: {display: 'none'},
months: {display: 'none'},
years: {display: 'block'}
}
});
},
setViewMonth: function(event) {
var viewDate = this.state.viewDate;
var month = event.target.innerHTML;
var months = this.state.locale.monthsShort;
var i = 0;
var len = months.length;
for (; i < len; i++) {
if (month === months[i]) {
viewDate.setMonth(i);
}
}
if (this.props.minViewMode === 'months') {
this.setViewDate(viewDate);
}
this.setState({
viewDate: viewDate,
displayed: {
days: {display: 'block'},
months: {display: 'none'},
years: {display: 'none'}
}
});
},
// YearsPicker props function
setViewYear: function(event) {
var year = event.target.innerHTML;
var viewDate = this.state.viewDate;
viewDate.setFullYear(year);
if (this.props.minViewMode === 'years') {
this.setViewDate(viewDate);
}
this.setState({
viewDate: viewDate,
displayed: {
days: {display: 'none'},
months: {display: 'block'},
years: {display: 'none'}
}
});
},
addDecade: function() {
var viewDate = this.state.viewDate;
var newDate = new Date(viewDate.valueOf());
newDate.setFullYear(viewDate.getFullYear() + 10);
this.setState({
viewDate: newDate
});
},
subtractDecade: function() {
var viewDate = this.state.viewDate;
var newDate = new Date(viewDate.valueOf());
newDate.setFullYear(viewDate.getFullYear() - 10);
this.setState({
viewDate: newDate
});
},
// render children
renderDays: function() {
return (
<DaysPicker
{...this.state}
subtractMonth={this.subtractMonth}
addMonth={this.addMonth}
setSelectedDate={this.setSelectedDate}
showMonths={this.showMonths}
getWidget={this.props.getWidget}
weekStart={this.props.weekStart}
daysOfWeekDisabled={this.props.daysOfWeekDisabled}
minDate={this.props.minDate}
maxDate={this.props.maxDate}
/>
);
},
renderMonths: function() {
return (
<MonthsPicker
style={this.state.displayed.months}
locale={this.state.locale}
addYear={this.addYear}
subtractYear={this.subtractYear}
viewDate={this.state.viewDate}
selectedDate={this.state.selectedDate}
showYears={this.showYears}
setViewMonth={this.setViewMonth} />
);
},
renderYears: function() {
return (
<YearsPicker
style={this.state.displayed.years}
viewDate={this.state.viewDate}
selectDate={this.state.selectedDate}
setViewYear={this.setViewYear}
addDecade={this.addDecade}
subtractDecade={this.subtractDecade} />
);
},
render: function() {
return (
<div className={this.prefixClass('body')}>
{this.renderDays()}
{this.renderMonths()}
{this.renderYears()}
</div>
);
}
});
var DaysPicker = React.createClass({
mixins: [ClassNameMixin],
//propTypes: {
// subtractMonth: React.PropTypes.func.isRequired,
// addMonth: React.PropTypes.func.isRequired,
//
// setSelectedDate: React.PropTypes.func.isRequired,
// selectedDate: React.PropTypes.object.isRequired,
//
// viewDate: React.PropTypes.object.isRequired,
// showMonths: React.PropTypes.func.isRequired,
//
// locale: React.PropTypes.object,
// weekStart: React.PropTypes.number,
// daysOfWeekDisabled: React.PropTypes.array,
// minDate: React.PropTypes.string,
// maxDate: React.PropTypes.string
//},
getInitialState: function() {
return {
prevLoading: this.props.prevLoading,
nextLoading: this.props.nextLoading
};
},
getDefaultProps: function() {
return {
classPrefix: 'datepicker'
};
},
renderDays: function(getWidget = ({year, month, date}) => date) {
var row;
var i;
var _ref;
var _i;
var _len;
var prevY;
var prevM;
var classes = {};
var html = [];
var cells = [];
var weekStart = this.props.weekStart || this.props.locale.weekStart;
var weekEnd = ((weekStart + 6) % 7);
var d = this.props.viewDate;
var year = d.getFullYear();
var month = d.getMonth();
var selectedDate = this.props.selectedDate;
var currentDate = new Date(selectedDate.getFullYear(), selectedDate.getMonth(), selectedDate.getDate(), 0, 0, 0, 0).valueOf();
var prevMonth = new Date(year, month - 1, 28, 0, 0, 0, 0);
var day = dateUtils.getDaysInMonth(prevMonth.getFullYear(), prevMonth.getMonth());
prevMonth.setDate(day);
prevMonth.setDate(day - (prevMonth.getDay() - weekStart + 7) % 7);
var nextMonth = new Date(prevMonth);
nextMonth.setDate(nextMonth.getDate() + 42);
nextMonth = nextMonth.valueOf();
var minDate = this.props.minDate && fecha.parse(this.props.minDate);
var maxDate = this.props.maxDate && fecha.parse(this.props.maxDate);
while (prevMonth.valueOf() < nextMonth) {
classes[this.prefixClass('day')] = true;
prevY = prevMonth.getFullYear();
prevM = prevMonth.getMonth();
// set className old new
if ((prevM < month && prevY === year) || prevY < year) {
classes[this.prefixClass('old')] = true;
} else if ((prevM > month && prevY === year) || prevY > year) {
classes[this.prefixClass('new')] = true;
}
// set className active
if (prevMonth.valueOf() === currentDate) {
classes[this.setClassNamespace('active')] = true;
}
// set className disabled
if ((minDate && prevMonth.valueOf() < minDate)
|| (maxDate && prevMonth.valueOf() > maxDate)) {
classes[this.setClassNamespace('disabled')] = true;
}
// week disabled
if (this.props.daysOfWeekDisabled) {
_ref = this.props.daysOfWeekDisabled;
for (_i = 0, _len = _ref.length; _i < _len; _i++) {
i = _ref[_i];
if (prevMonth.getDay() === this.props.daysOfWeekDisabled[i]) {
classes[this.setClassNamespace('disabled')] = true;
break;
}
}
}
let date = prevMonth.getDate();
let className = classNames(classes);
cells.push(
<td
key={prevMonth.getMonth() + '-' + date}
className={className}
onClick={() => this.props.setSelectedDate({
className,
date
})}>
{getWidget({
year: d.getFullYear(),
month: prevMonth.getMonth(),
date
})}
</td>
);
// add tr
if (prevMonth.getDay() === weekEnd) {
row = <tr key={prevMonth.getMonth() + '-' + prevMonth.getDate()}>{cells}</tr>;
html.push(row);
cells = [];
}
classes = {};
prevMonth.setDate(prevMonth.getDate() + 1);
}
return html;
},
renderWeek: function() {
var ths = [];
var locale = this.props.locale;
var weekStart = this.props.weekStart || this.props.locale.weekStart;
var weekEnd = weekStart + 7;
while (weekStart < weekEnd) {
ths.push(
<th key={weekStart} className={this.prefixClass('dow')}>
{locale.daysMin[weekStart++ % 7]}
</th>
);
}
return (
<tr>
{ths}
</tr>
);
},
componentWillReceiveProps: function(nextProps) {
const { prevLoading, nextLoading } = nextProps;
this.setState({
prevLoading,
nextLoading
})
},
render: function() {
var prefixClass = this.prefixClass;
var { viewDate, locale, getWidget } = this.props;
return (
<div
className={prefixClass('days')}
style={this.props.displayed.days}>
<table className={prefixClass('table')}>
<thead>
<tr className={prefixClass('header')}>
<th className={prefixClass('prev')} onClick={this.props.subtractMonth}>
{
this.state.prevLoading ?
<Icon spin icon="circle-o-notch" />
:
<i className={prefixClass('prev-icon')}></i>
}
</th>
<th
className={prefixClass('switch')}
colSpan="5"
onClick={this.props.showMonths}>
<div className={this.prefixClass('select')}>
{locale.monthsShort[viewDate.getMonth()]}
{viewDate.getFullYear()}
</div>
</th>
<th className={prefixClass('next')} onClick={this.props.addMonth}>
{
this.state.nextLoading ?
<Icon spin icon="circle-o-notch" />
:
<i className={prefixClass('next-icon')}></i>
}
</th>
</tr>
{this.renderWeek()}
</thead>
<tbody>
{this.renderDays(getWidget)}
</tbody>
</table>
</div>
);
}
});
var MonthsPicker = React.createClass({
mixins: [ClassNameMixin],
propTypes: {
locale: React.PropTypes.object,
subtractYear: React.PropTypes.func.isRequired,
addYear: React.PropTypes.func.isRequired,
viewDate: React.PropTypes.object.isRequired,
selectedDate: React.PropTypes.object.isRequired,
showYears: React.PropTypes.func.isRequired,
setViewMonth: React.PropTypes.func.isRequired,
minDate: React.PropTypes.string,
maxDate: React.PropTypes.string
},
getDefaultProps: function() {
return {
classPrefix: 'datepicker'
};
},
renderMonths: function() {
var classes = {};
var month = this.props.selectedDate.getMonth();
var year = this.props.selectedDate.getFullYear();
var i = 0;
var months = [];
var minDate = this.props.minDate && fecha.parse(this.props.minDate);
var maxDate = this.props.maxDate && fecha.parse(this.props.maxDate);
var prevMonth = new Date(year, month);
// TODO: minDate maxDate months
while (i < 12) {
classes[this.prefixClass('month')] = true;
if (this.props.viewDate.getFullYear() ===
this.props.selectedDate.getFullYear()
&& i === month) {
classes[this.setClassNamespace('active')] = true;
}
// set className disabled
if ((minDate && prevMonth.valueOf() < minDate)
|| (maxDate && prevMonth.valueOf() > maxDate)) {
classes[this.setClassNamespace('disabled')] = true;
}
months.push(
<span
className={classNames(classes)}
onClick={this.props.setViewMonth}
key={i}>
{this.props.locale.monthsShort[i]}
</span>
);
classes = {};
i++;
}
return months;
},
render: function() {
return (
<SubPicker
displayName="months"
style={this.props.style}
subtract={this.props.subtractYear}
add={this.props.addYear}
showFunc={this.props.showYears}
showText={this.props.viewDate.getFullYear()}
body={this.renderMonths()} />
);
}
});
var YearsPicker = React.createClass({
mixins: [ClassNameMixin],
propTypes: {
viewDate: React.PropTypes.object.isRequired,
selectDate: React.PropTypes.object.isRequired,
subtractDecade: React.PropTypes.func.isRequired,
addDecade: React.PropTypes.func.isRequired,
setViewYear: React.PropTypes.func.isRequired
},
getDefaultProps: function() {
return {
classPrefix: 'datepicker'
};
},
renderYears: function() {
var classes = {};
var years = [];
var i = -1;
var year = parseInt(this.props.viewDate.getFullYear() / 10, 10) * 10;
year--;
while (i < 11) {
classes[this.prefixClass('year')] = true;
if (i === -1 || i === 10) {
classes[this.prefixClass('old')] = true;
}
if (this.props.selectDate.getFullYear() === year) {
classes[this.setClassNamespace('active')] = true;
}
years.push(
<span
className={classNames(classes)}
onClick={this.props.setViewYear}
key={year}>
{year}
</span>
);
classes = {};
year++;
i++;
}
return years;
},
render: function() {
var year = parseInt(this.props.viewDate.getFullYear() / 10, 10) * 10;
var addYear = year + 9;
var showYear = year + '-' + addYear;
return (
<SubPicker
displayName="years"
style={this.props.style}
subtract={this.props.subtractDecade}
add={this.props.addDecade}
showText={showYear}
body={this.renderYears()} />
);
}
});
var SubPicker = React.createClass({
mixins: [ClassNameMixin],
getDefaultProps: function() {
return {
classPrefix: 'datepicker'
};
},
render: function() {
var prefixClass = this.prefixClass;
return (
<div
className={prefixClass(this.props.displayName)}
style={this.props.style}>
<table className={prefixClass('table')}>
<thead>
<tr className={prefixClass('header')}>
<th className={prefixClass('prev')} onClick={this.props.subtract}>
<i className={prefixClass('prev-icon')}></i>
</th>
<th
className={prefixClass('switch')}
colSpan="5"
onClick={this.props.showFunc}>
<div className={this.prefixClass('select')}>
{this.props.showText}
</div>
</th>
<th className={prefixClass('next')} onClick={this.props.add}>
<i className={prefixClass('next-icon')}></i>
</th>
</tr>
</thead>
<tbody>
<tr>
<td colSpan="7">
{this.props.body}
</td>
</tr>
</tbody>
</table>
</div>
);
}
});
module.exports = DatePicker;<|fim▁end|> | return this.setState({
viewDate: newDate |
<|file_name|>test_blacklist_model.py<|end_file_name|><|fim▁begin|>"""
Module for testing the Blacklist model
"""
import unittest
from app.models.shopping import BlacklistToken, ShoppingList
try:
from .common_functions import BaseModelTestClass
except (ImportError, SystemError):
from common_functions import BaseModelTestClass
class BlacklistModelTest(BaseModelTestClass):
"""
Handles the testing for the Blacklist model class
"""
def blacklist_token(self, token=None):
"""
Helper method to blacklist a token
"""
with self.app.app_context():
if not token:
# create token from default user
token = self.user.generate_token(self.user.id)
# blacklist token
try:
blacklisted_token = BlacklistToken(token=token)
# putting save() commits to session and closes the session
return blacklisted_token<|fim▁hole|> """
A token can be blacklisted
"""
with self.app.app_context():
blacklisted_token = self.blacklist_token()
blacklisted_token.save() # save in the same session
from_database = BlacklistToken.query.get(blacklisted_token.id)
self.assertEqual(blacklisted_token.token, from_database.token)
def test_token_is_string_or_bytes(self):
"""
The token must be a string
"""
with self.app.app_context():
# try blacklisting a token that is an int
self.assertRaises(TypeError, self.blacklist_token, token=76)
self.assertRaises(TypeError, self.blacklist_token, token=True)
self.assertRaises(TypeError, self.blacklist_token, token=
{'token':'should be string or bytes'})
def test_token_can_be_searched_for(self):
"""
Blacklisted token can be searched for and found
"""
with self.app.app_context():
blacklisted_token = self.blacklist_token()
blacklisted_token.save() # save in the same session
self.assertTrue(BlacklistToken.check_blacklist(blacklisted_token.token))
def test_only_valid_tokens_allowed(self):
"""
Only valid tokens should be blacklisted
"""
with self.app.app_context():
# try blacklisting a token that is an int
self.assertRaises(ValueError, self.blacklist_token,
token='some random string')
self.assertRaises(ValueError, self.blacklist_token,
token='some random string to be converted to bytes'.encode('utf-8'))<|fim▁end|> | except Exception as e:
raise e
def test_token_can_be_blacklisted(self): |
<|file_name|>topo.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.log import setLogLevel, info, debug
from mininet.node import Host, RemoteController, OVSSwitch
# Must exist and be owned by quagga user (quagga:quagga by default on Ubuntu)
QUAGGA_RUN_DIR = '/var/run/quagga'
QCONFIG_DIR = 'configs'
ZCONFIG_DIR = 'configs'
<|fim▁hole|>
self.route = route
def config(self, **kwargs):
Host.config(self, **kwargs)
debug("configuring route %s" % self.route)
self.cmd('ip route add default via %s' % self.route)
class Router(Host):
def __init__(self, name, quaggaConfFile, zebraConfFile, intfDict, *args, **kwargs):
Host.__init__(self, name, *args, **kwargs)
self.quaggaConfFile = quaggaConfFile
self.zebraConfFile = zebraConfFile
self.intfDict = intfDict
def config(self, **kwargs):
Host.config(self, **kwargs)
self.cmd('sysctl net.ipv4.ip_forward=1')
for intf, attrs in self.intfDict.items():
self.cmd('ip addr flush dev %s' % intf)
# setup mac address to specific interface
if 'mac' in attrs:
self.cmd('ip link set %s down' % intf)
self.cmd('ip link set %s address %s' % (intf, attrs['mac']))
self.cmd('ip link set %s up ' % intf)
# setup address to interfaces
for addr in attrs['ipAddrs']:
self.cmd('ip addr add %s dev %s' % (addr, intf))
self.cmd('zebra -d -f %s -z %s/zebra%s.api -i %s/zebra%s.pid' % (self.zebraConfFile, QUAGGA_RUN_DIR, self.name, QUAGGA_RUN_DIR, self.name))
self.cmd('bgpd -d -f %s -z %s/zebra%s.api -i %s/bgpd%s.pid' % (self.quaggaConfFile, QUAGGA_RUN_DIR, self.name, QUAGGA_RUN_DIR, self.name))
def terminate(self):
self.cmd("ps ax | egrep 'bgpd%s.pid|zebra%s.pid' | awk '{print $1}' | xargs kill" % (self.name, self.name))
Host.terminate(self)
class SdnIpTopo(Topo):
def build(self):
zebraConf = '{}/zebra.conf'.format(ZCONFIG_DIR)
s1 = self.addSwitch('s1', dpid='0000000000000001', cls=OVSSwitch, failMode="standalone")
# Quagga 1
bgpEth0 = {
'mac': '00:00:00:00:00:01',
'ipAddrs': [
'10.0.1.1/24',
]
}
bgpIntfs = {
'bgpq1-eth0': bgpEth0
}
bgpq1 = self.addHost("bgpq1", cls=Router,
quaggaConfFile='{}/quagga1.conf'.format(QCONFIG_DIR),
zebraConfFile=zebraConf,
intfDict=bgpIntfs)
self.addLink(bgpq1, s1)
# Quagga 2
bgpEth0 = {
'mac': '00:00:00:00:00:02',
'ipAddrs': [
'10.0.2.1/24',
]
}
bgpIntfs = {
'bgpq2-eth0': bgpEth0
}
bgpq2 = self.addHost("bgpq2", cls=Router,
quaggaConfFile='{}/quagga2.conf'.format(QCONFIG_DIR),
zebraConfFile=zebraConf,
intfDict=bgpIntfs)
self.addLink(bgpq2, s1)
topos = {'sdnip': SdnIpTopo}
if __name__ == '__main__':
setLogLevel('debug')
topo = SdnIpTopo()
net = Mininet(topo=topo, controller=RemoteController)
net.start()
CLI(net)
net.stop()
info("done\n")<|fim▁end|> | class SdnIpHost(Host):
def __init__(self, name, ip, route, *args, **kwargs):
Host.__init__(self, name, ip=ip, *args, **kwargs) |
<|file_name|>problem_28.cpp<|end_file_name|><|fim▁begin|>#include "problem_28.hpp"
#include <iostream>
using namespace std;
int sum_for_corners_in_square(int side) {
return 4*side*side - 6*(side - 1);
}
TEST(problem_28, full) {
int sum(1);
<|fim▁hole|> EXPECT_EQ(669171001, sum);
}<|fim▁end|> | for (int d = 3; d <= 1001; d+=2) {
sum += sum_for_corners_in_square(d);
}
|
<|file_name|>cron.py<|end_file_name|><|fim▁begin|>import kronos
import random
@kronos.register('0 0 * * *')
def complain():
complaints = [
"I forgot to migrate our applications's cron jobs to our new server! Darn!",<|fim▁hole|><|fim▁end|> | "I'm out of complaints! Damnit!"
]
print random.choice(complaints) |
<|file_name|>issue-11820.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
struct NoClone;
fn main() {
let rnc = &NoClone;
let rsnc = &Some(NoClone);
let _: &NoClone = rnc.clone();
let _: &Option<NoClone> = rsnc.clone();<|fim▁hole|><|fim▁end|> | } |
<|file_name|>issue-10392.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
<|fim▁hole|>
fn a() -> A { panic!() }
fn main() {
let A { , } = a(); //~ ERROR: expected ident
}<|fim▁end|> | struct A { foo: isize } |
<|file_name|>one.rs<|end_file_name|><|fim▁begin|>pub fn run() {
let sum: u32 = (1..1000).map(|i|
match (i % 3, i % 5) {<|fim▁hole|> _ => 0,
}
).sum();
println!("Sum: {}", sum);
}<|fim▁end|> | (0, 0) => i,
(0, _) => i,
(_, 0) => i, |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>__all__ = [
"CorruptedMessage",
"Stats",
"UnexpectedCommand",
"UnexpectedEOF",
"UnknownCommand",
"log",
]
import logging
import struct
log = logging.getLogger("offhand")
class UnexpectedEOF(Exception):<|fim▁hole|>
class UnknownCommand(Exception):
def __init__(self, command):
Exception.__init__(self, "Unknown command: %r" % command)
class UnexpectedCommand(Exception):
def __init__(self, command):
Exception.__init__(self, "Unexpected command: %r" % command)
class CorruptedMessage(Exception):
def __init__(self):
Exception.__init__(self, "Corrupted message")
class Stats(object):
__slots__ = [
"connecting",
"connected",
"idle",
"busy",
"total_engaged",
"total_canceled",
"total_rolledback",
"total_timeouts",
"total_disconnects",
"total_errors",
]
def __init__(self, copy=None):
for key in self.__slots__:
setattr(self, key, getattr(copy, key) if copy else 0)
def __nonzero__(self):
return any(getattr(self, key) for key in self.__slots__)
def __str__(self):
return " ".join("%s=%s" % (key, getattr(self, key)) for key in self.__slots__)
def parse_message(data):
message = []
offset = 0
while True:
remain = len(data) - offset
if remain == 0:
break
if remain < 4:
raise CorruptedMessage()
part_size, = struct.unpack("<I", data[offset: offset + 4])
offset += 4
if remain < 4 + part_size:
raise CorruptedMessage()
message.append(data[offset: offset + part_size])
offset += part_size
return message<|fim▁end|> |
def __init__(self):
Exception.__init__(self, "Connection closed unexpectedly")
|
<|file_name|>main.go<|end_file_name|><|fim▁begin|>package main
import (
"bufio"
"encoding/hex"
"errors"
"fmt"
"github.com/boxofrox/cctv-ptz/config"
"github.com/docopt/docopt-go"
"github.com/mikepb/go-serial"
"github.com/simulatedsimian/joystick"
"io"
"math"
"os"
"strconv"
"strings"
"syscall"
"time"
)
var (
VERSION string
BUILD_DATE string
)
// pelco d byte names
const (
SYNC = 0
ADDR = 1
COMMAND_1 = 2
COMMAND_2 = 3
DATA_1 = 4
DATA_2 = 5
CHECKSUM = 6
)
type PelcoDMessage [7]byte
type DelayedMessage struct {
Message PelcoDMessage
Delay time.Duration
}
const (
AxisMax = 32767
MaxSpeed = 0x3f
)
type Axis struct {
Index int32
Min int32 // used for normalizing input -1.0 to 1.0
Max int32
Deadzone int32
Inverted bool // flips normalized input
}
var xbox = struct {
LeftAxisX Axis
LeftAxisY Axis
RightAxisX Axis
RightAxisY Axis
LeftTrigger Axis
RightTrigger Axis
DPadX Axis
DPadY Axis
LeftBumper uint32
RightBumper uint32
A uint32
B uint32
X uint32
Y uint32
Start uint32
Back uint32
XBox uint32
}{
Axis{0, -AxisMax, AxisMax, 8192, false}, // left analog stick
Axis{1, -AxisMax, AxisMax, 8192, true},
Axis{3, -AxisMax, AxisMax, 8192, false}, // right analog stick
Axis{4, -AxisMax, AxisMax, 8192, true},
Axis{2, -AxisMax, AxisMax, 1000, false}, // triggers
Axis{5, -AxisMax, AxisMax, 1000, false},
Axis{6, -AxisMax, AxisMax, 1000, false}, // directional pad
Axis{7, -AxisMax, AxisMax, 1000, false},
1 << 4, // bumpers
1 << 5,
1 << 0, // A
1 << 1, // B
1 << 2, // X
1 << 3, // Y
1 << 7, // start
1 << 6, // back
1 << 8, // xbox button
}
// map xbox controller to pan-tilt-zoom controls and misc app controls
var ptz = struct {
// pan tilt zoom
PanX Axis
PanY Axis
ZoomIn uint32
ZoomOut uint32
OpenIris uint32
CloseIris uint32
OpenMenu uint32
// misc
IncPelcoAddr uint32
DecPelcoAddr uint32
ResetTimer uint32
MarkLeft Axis
MarkRight Axis
}{
xbox.LeftAxisX, // pan x
xbox.RightAxisY, // pan y
xbox.LeftBumper, // zoom in
xbox.RightBumper, // zoom out
xbox.A, // open iris (enter)
xbox.B, // close iris
xbox.Start, // open menu
xbox.Y, // increment pelco address
xbox.X, // decrement pelco address
xbox.Back, // reset timer
xbox.LeftTrigger, // mark
xbox.RightTrigger, // mark
}
func main() {
var (
err error
arguments map[string]interface{}
)
usage := `CCTV Pan-Tilt-Zoom via Xbox Controller
Usage:
cctv-ptz [-v] [-a ADDRESS] [-s FILE] [-j JOYSTICK] [-r FILE] [-b BAUD] [-m MAXSPEED]
cctv-ptz playback [-a ADDRESS] [-s FILE] [-b BAUD] [-v]
cctv-ptz -h
cctv-ptz -V
Options:
-a, --address ADDRESS - Pelco-D address 0-256. (default = 0)
-b, --baud BAUD - set baud rate of serial port. (default = 9600)
-j, --joystick JOYSTICK - use joystick NUM (e.g. /dev/input/jsNUM). (default = 0)
-m, --maxspeed MAXSPEED - set max speed setting 0-100. (default = 100)
-s, --serial FILE - assign serial port for rs485 output. (default = /dev/sttyUSB0)
-r, --record FILE - record rs485 commands to file. (default = /dev/null)
-v, --verbose - prints Pelco-D commands to stdout.
-h, --help - print this help message.
-V, --version - print version info.
`
arguments, err = docopt.Parse(usage, nil, true, version(), false)
// fail if arguments failed to parse
if err != nil {
panic(err)
}
conf := config.Load(arguments)
if arguments["playback"].(bool) {
playback(conf)
} else {
interactive(conf)
}
}
func createSerialOptions(conf config.Config) serial.Options {
return serial.Options{
Mode: serial.MODE_WRITE,
BitRate: conf.BaudRate,
DataBits: 8,
StopBits: 1,
Parity: serial.PARITY_NONE,
FlowControl: serial.FLOWCONTROL_NONE,
}
}
func decodeMessage(text string) (PelcoDMessage, error) {
var (
bytes []byte
err error
)
message := PelcoDMessage{}
if bytes, err = hex.DecodeString(text); err != nil {
return message, err
}
copy(message[:], bytes)
return message, nil
}
func interactive(conf config.Config) {
var (
record *os.File
tty *serial.Port
jsObserver <-chan joystick.State
err error
resetTimer = true
serialEnabled = ("/dev/null" != conf.SerialPort)
hasSerialAccess bool
)
stdinObserver := listenFile(os.Stdin)
js, err := joystick.Open(conf.JoystickNumber)
if err != nil {
fmt.Fprintf(os.Stderr, "cctv-ptz: error opening joystick %d. %s\n", conf.JoystickNumber, err)
jsObserver = listenNothing()
} else {
defer js.Close()
fmt.Fprintf(os.Stderr, "Joystick port opened. /dev/input/js%d\n", conf.JoystickNumber)
fmt.Fprintf(os.Stderr, " Joystick Name: %s\n", js.Name())
fmt.Fprintf(os.Stderr, " Axis Count: %d\n", js.AxisCount())
fmt.Fprintf(os.Stderr, " Button Count: %d\n", js.ButtonCount())
jsTicker := time.NewTicker(100 * time.Millisecond)
jsObserver = listenJoystick(js, jsTicker)
}
hasSerialAccess, err = serialPortAvailable(conf.SerialPort)
if err != nil {
fmt.Fprintf(os.Stderr, "cctv-ptz: cannot open serial port (%s). %s\n", conf.SerialPort, err)
}
if serialEnabled && hasSerialAccess {
ttyOptions := createSerialOptions(conf)
tty, err = ttyOptions.Open(conf.SerialPort)
if err != nil {
fmt.Fprintf(os.Stderr, "cctz-ptz: unable to open tty: %s\n", conf.SerialPort)
os.Exit(1)
}
defer tty.Close()
printSerialPortInfo(conf, tty)
} else {
fmt.Fprintf(os.Stderr, "cctv-ptz: serial port disabled\n")
}
if "-" == conf.RecordFile {
record = os.Stdout
} else {
if record, err = os.Create(conf.RecordFile); err != nil {
panic(err)
}
}
defer record.Close()
// limit rate at which Pelco address may change via joystick
allowAddressChange := make(chan struct{}, 1)
allowAddressChange <- struct{}{} // prime channel to allow first address change
startTime := time.Now()
lastMessage := PelcoDMessage{}
for {
select {
case <-stdinObserver:
return
case state := <-jsObserver:
// adjust Pelco address
if isPressed(state, ptz.DecPelcoAddr) {
limitChange(allowAddressChange, func() { conf.Address -= 1 })
} else if isPressed(state, ptz.IncPelcoAddr) {
limitChange(allowAddressChange, func() { conf.Address += 1 })
}
// reset the clock if user presses Back
if isPressed(state, ptz.ResetTimer) {
resetTimer = true
}
if isMarkTriggered(state, ptz.MarkLeft) {
fmt.Fprintf(record, "# Mark Left\n")
}
if isMarkTriggered(state, ptz.MarkRight) {
fmt.Fprintf(record, "# Mark Right\n")
}
message := pelcoCreate()
message = pelcoTo(message, conf.Address)
message = joystickToPelco(message, state, conf.MaxSpeed)
message = pelcoChecksum(message)
if lastMessage != message {
var millis int64
if resetTimer {
millis = 0
resetTimer = false
startTime = time.Now()
} else {
endTime := time.Now()
millis = (endTime.Sub(startTime)).Nanoseconds() / 1E6
startTime = endTime
}
if conf.Verbose {
fmt.Printf("pelco-d %x %d\n", message, millis)
} else {
fmt.Fprintf(os.Stderr, "\033[Kpelco-d %x %d\r", message, millis)
}
fmt.Fprintf(record, "pelco-d %x %d\n", message, millis)
if serialEnabled {
tty.Write(message[:])
}
lastMessage = message
}
}
}
}
func isPressed(state joystick.State, mask uint32) bool {
return 0 != state.Buttons&mask
}
func joystickToPelco(buffer PelcoDMessage, state joystick.State, maxSpeed int32) PelcoDMessage {
var zoom float32
panX := normalizeAxis(state, ptz.PanX)
panY := normalizeAxis(state, ptz.PanY)
openIris := isPressed(state, ptz.OpenIris)
closeIris := isPressed(state, ptz.CloseIris)
openMenu := isPressed(state, ptz.OpenMenu)
if isPressed(state, ptz.ZoomOut) {
zoom = -1.0
} else if isPressed(state, ptz.ZoomIn) {
zoom = 1.0
}
buffer = pelcoApplyJoystick(buffer, panX, panY, zoom, openIris, closeIris, openMenu, maxSpeed)
return buffer
}
func limitChange(allowAddressChange chan struct{}, proc func()) {
select {
case <-allowAddressChange:
proc()
// delay next signal to allow change
go func() {
<-time.After(125 * time.Millisecond)
allowAddressChange <- struct{}{}
}()
default:
// do nothing
}
}
func listenFile(f io.Reader) <-chan []byte {
io := make(chan []byte)
scanner := bufio.NewScanner(f)
go func() {
defer close(io)
for scanner.Scan() {
bytes := scanner.Bytes()
if len(bytes) == 0 {
break
}
io <- bytes
}
if err := scanner.Err(); err != nil {
panic(err)
}
}()
return io
}
func listenJoystick(js joystick.Joystick, ticker *time.Ticker) <-chan joystick.State {
io := make(chan joystick.State, 20)
go func() {
for range ticker.C {
if state, err := js.Read(); err != nil {
panic(err)
} else {
io <- state
}
time.Sleep(25 * time.Millisecond)
}
}()
return io
}
func listenNothing() <-chan joystick.State {
return make(chan joystick.State)
}
func isMarkTriggered(state joystick.State, axis Axis) bool {
triggerValue := normalizeAxis(state, axis)
return 0.5 < triggerValue
}
func normalizeAxis(state joystick.State, axis Axis) float32 {
var (
value = float32(state.AxisData[axis.Index])
deadzone = float32(axis.Deadzone)
max = float32(axis.Max)
)
if axis.Inverted {
value = -value
}
if value > 0 && value < deadzone {
value = 0
} else if value > deadzone {
value = (value - deadzone) / (max - deadzone)
} else if value < 0 && value > -deadzone {
value = 0
} else if value < -deadzone {
value = (value + deadzone) / (max - deadzone)
}
return value
}
func pelcoCreate() PelcoDMessage {
buffer := PelcoDMessage{}
buffer[SYNC] = 0xff
return buffer
}
// should be last call before sending a pelco message
func pelcoChecksum(buffer PelcoDMessage) PelcoDMessage {
buffer[CHECKSUM] = uint8(buffer[ADDR] + buffer[COMMAND_1] + buffer[COMMAND_2] + buffer[DATA_1] + buffer[DATA_2])
return buffer
}
func pelcoTo(buffer PelcoDMessage, addr int) PelcoDMessage {
buffer[ADDR] = uint8(addr)
return buffer
}
func pelcoApplyJoystick(buffer PelcoDMessage, panX, panY, zoom float32, openIris, closeIris, openMenu bool, maxSpeed int32) PelcoDMessage {
if openMenu {
buffer[COMMAND_1] = 0x00
buffer[COMMAND_2] = 0x03
buffer[DATA_1] = 0x00
buffer[DATA_2] = 0x5F
return buffer
}
if panX > 0 {
buffer[COMMAND_2] |= 1 << 1
} else if panX < 0 {
buffer[COMMAND_2] |= 1 << 2
}
// pan speed
buffer[DATA_1] = uint8(float64(maxSpeed) * math.Abs(float64(panX)))
if panY > 0 {
buffer[COMMAND_2] |= 1 << 3
} else if panY < 0 {
buffer[COMMAND_2] |= 1 << 4
}
// tilt speed
buffer[DATA_2] = uint8(float64(maxSpeed) * math.Abs(float64(panY)))
if zoom > 0 {
buffer[COMMAND_2] |= 1 << 5
} else if zoom < 0 {
buffer[COMMAND_2] |= 1 << 6
}
if openIris {
buffer[COMMAND_1] |= 1 << 1
} else if closeIris {
buffer[COMMAND_1] |= 1 << 2
}
return buffer
}
func playback(conf config.Config) {
var (
message PelcoDMessage
tty *serial.Port
millis uint64
err error
serialEnabled = ("/dev/null" != conf.SerialPort)
hasSerialAccess bool
)
hasSerialAccess, err = serialPortAvailable(conf.SerialPort)
if err != nil {
fmt.Fprintf(os.Stderr, "cctv-ptz: cannot open serial port (%s). %s\n", conf.SerialPort, err)
}
if serialEnabled && hasSerialAccess {
ttyOptions := createSerialOptions(conf)
tty, err = ttyOptions.Open(conf.SerialPort)
if err != nil {
panic(err)
}
defer tty.Close()
printSerialPortInfo(conf, tty)
} else {
fmt.Fprintf(os.Stderr, "Serial port disabled\n")
}
messageChannel := make(chan DelayedMessage)
defer close(messageChannel)
go sendDelayedMessages(messageChannel, tty, conf.Verbose)
lineCount := 0
lineScanner := bufio.NewScanner(os.Stdin)
for lineScanner.Scan() {
text := strings.TrimSpace(lineScanner.Text())
if strings.HasPrefix(text, "#") {
continue
}
words := strings.Fields(text)
lineCount += 1
if 3 > len(words) {
fmt.Fprintf(os.Stderr, "cctv-ptz: error parsing playback. Too few fields. Line %d: %s\n", lineCount, text)
continue
}
if "pelco-d" != words[0] {
fmt.Fprintf(os.Stderr, "cctv-ptz: error parsing playback. Invalid protocol %s. Line %d: %s\n", words[0], lineCount, text)
continue
}
if message, err = decodeMessage(words[1]); err != nil {
fmt.Fprintf(os.Stderr, "cctv-ptz: error parsing playback. Invalid packet %s. Line %d: %s\n", err.Error(), lineCount, text)
continue
}
if millis, err = strconv.ParseUint(words[2], 10, 64); err != nil {
fmt.Fprintf(os.Stderr, "cctv-ptz: error parsing playback. Invalid duration %s. Line %d: %s\n", err.Error(), lineCount, text)
continue
}
messageChannel <- DelayedMessage{message, time.Duration(millis) * time.Millisecond}
if conf.Verbose {
fmt.Fprintf(os.Stderr, "%s\n", text)
}
}
}
func printSerialPortInfo(conf config.Config, tty *serial.Port) {
baud, err := tty.BitRate()
if err != nil {
panic(err)
}
data, err := tty.DataBits()
if err != nil {
panic(err)
}
stop, err := tty.StopBits()
if err != nil {
panic(err)
}
parity, err := tty.Parity()
if err != nil {
panic(err)
}
fmt.Fprintf(os.Stderr, "Serial port opened. %s\n", conf.SerialPort)
fmt.Fprintf(os.Stderr, " Name: %s\n", tty.Name())
fmt.Fprintf(os.Stderr, " Baud rate: %d\n", baud)
fmt.Fprintf(os.Stderr, " Data bits: %d\n", data)<|fim▁hole|>func sendMessage(tty *serial.Port, message PelcoDMessage) {
if nil != tty {
tty.Write(message[:])
}
}
func sendDelayedMessages(c <-chan DelayedMessage, tty *serial.Port, verbose bool) {
var (
pkg DelayedMessage
lastTime time.Time
)
// send first message without delay
pkg = <-c
sendMessage(tty, pkg.Message)
lastTime = time.Now()
// all other messages are delayed wrt preceeding messages
for pkg = range c {
time.Sleep(pkg.Delay)
sendMessage(tty, pkg.Message)
if verbose {
duration := time.Now().Sub(lastTime) / 1E6
delay := pkg.Delay / 1E6
fmt.Fprintf(os.Stderr, "Sent %x after %d millis. target %d millis. offset %d millis\n",
pkg.Message, duration, delay, duration-delay)
}
lastTime = time.Now()
}
}
func serialPortAvailable(serialPort string) (bool, error) {
var err error
goStat, err := os.Stat(serialPort)
if os.IsNotExist(err) || os.IsPermission(err) {
return false, err
}
euid := uint32(os.Geteuid())
unixStat, ok := goStat.Sys().(*syscall.Stat_t)
if !ok {
return false, errors.New("cannot determine file ownership or permissions")
}
if euid == unixStat.Uid && 0 != (0x600&unixStat.Mode) {
// we should have owner access!
return true, nil
}
if 0 != (0x006 & unixStat.Mode) {
// we should have other access!
return true, nil
}
if 0 != (0x060 & unixStat.Mode) {
groups, err := os.Getgroups()
if err != nil {
return false, err
}
// does any group for user match file's group?
for _, gid := range groups {
if uint32(gid) == unixStat.Gid {
// we should have group access!
return true, nil
}
}
}
return false, errors.New(fmt.Sprintf("access denied. uid (%d) gid (%d) mode (%o)", unixStat.Uid, unixStat.Gid, 0xfff & unixStat.Mode))
}
func version() string {
return fmt.Sprintf("%s: version %s, build %s\n\n", os.Args[0], VERSION, BUILD_DATE)
}<|fim▁end|> | fmt.Fprintf(os.Stderr, " Stop bits: %d\n", stop)
fmt.Fprintf(os.Stderr, " Parity: %d\n", parity)
}
|
<|file_name|>hasManyThrough.js<|end_file_name|><|fim▁begin|>var assert = require('assert');
var _ = require('@sailshq/lodash');
var SchemaBuilder = require('../lib/waterline-schema');
describe('Has Many Through :: ', function() {
describe('Junction Tables', function() {
var schema;
before(function() {
var fixtures = [
{
identity: 'user',
primaryKey: 'id',
attributes: {
id: {
type: 'number'
},
cars: {
collection: 'car',
through: 'drive',
via: 'user'
}
}<|fim▁hole|>
{
identity: 'drive',
primaryKey: 'id',
attributes: {
id: {
type: 'number'
},
car: {
model: 'car'
},
user: {
model: 'user'
}
}
},
{
identity: 'car',
primaryKey: 'id',
attributes: {
id: {
type: 'number'
},
drivers: {
collection: 'user',
through: 'drive',
via: 'car'
}
}
}
];
var collections = _.map(fixtures, function(obj) {
var collection = function() {};
collection.prototype = obj;
return collection;
});
// Build the schema
schema = SchemaBuilder(collections);
});
it('should flag the "through" table and not mark it as a junction table', function() {
assert(schema.drive);
assert(!schema.drive.junctionTable);
assert(schema.drive.throughTable);
});
});
describe('Reference Mapping', function() {
var schema;
before(function() {
var fixtures = [
{
identity: 'foo',
primaryKey: 'id',
attributes: {
id: {
type: 'number'
},
bars: {
collection: 'bar',
through: 'foobar',
via: 'foo'
}
}
},
{
identity: 'foobar',
primaryKey: 'id',
attributes: {
id: {
type: 'number'
},
type: {
type: 'string'
},
foo: {
model: 'foo',
columnName: 'foo_id'
},
bar: {
model: 'bar',
columnName: 'bar_id'
}
}
},
{
identity: 'bar',
primaryKey: 'id',
attributes: {
id: {
type: 'number'
},
foo: {
collection: 'foo',
through: 'foobar',
via: 'bar'
}
}
}
];
var collections = _.map(fixtures, function(obj) {
var collection = function() {};
collection.prototype = obj;
return collection;
});
// Build the schema
schema = SchemaBuilder(collections);
});
it('should update the parent collection to point to the join table', function() {
assert.equal(schema.foo.schema.bars.references, 'foobar');
assert.equal(schema.foo.schema.bars.on, 'foo_id');
});
});
});<|fim▁end|> | }, |
<|file_name|>inverse.rs<|end_file_name|><|fim▁begin|>use num::{Zero, One, Signed};
use bigint::{BigUint, BigInt, Sign};
pub trait Inverse {
type Output;
fn inverse(self, modulo: Self) -> Option<Self::Output>;
}
impl<'a> Inverse for &'a BigUint {
type Output = BigUint;<|fim▁hole|>
fn inverse(self, modulo: Self) -> Option<Self::Output> {
BigInt::from_biguint(Sign::Plus, self.clone())
.inverse(&BigInt::from_biguint(Sign::Plus, modulo.clone()))
.and_then(|n| n.to_biguint())
}
}
impl<'a> Inverse for &'a BigInt {
type Output = BigInt;
fn inverse(self, modulo: Self) -> Option<Self::Output> {
let (mut t, mut new_t): (BigInt, BigInt) = (Zero::zero(), One::one());
let (mut r, mut new_r): (BigInt, BigInt) = (modulo.clone(), self.clone());
while !new_r.is_zero() {
let quo = &r / &new_r;
let tmp = &r - &quo * &new_r;
r = new_r;
new_r = tmp;
let tmp = &t - &quo * &new_t;
t = new_t;
new_t = tmp;
}
if r != One::one() {
return None;
}
if t.is_negative() {
Some(t + modulo)
} else {
Some(t)
}
}
}<|fim▁end|> | |
<|file_name|>wrap_unhygienic_example.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.<|fim▁hole|>// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// ignore-pretty pretty-printing is unhygienic
// aux-build:my_crate.rs
// aux-build:unhygienic_example.rs
#![feature(decl_macro)]
extern crate unhygienic_example;
extern crate my_crate; // (b)
// Hygienic version of `unhygienic_macro`.
pub macro hygienic_macro() {
fn g() {} // (c)
::unhygienic_example::unhygienic_macro!();
// ^ Even though we invoke an unhygienic macro, `hygienic_macro` remains hygienic.
// In the above expansion:
// (1) `my_crate` always resolves to (b) regardless of invocation site.
// (2) The defined function `f` is only usable inside this macro definition.
// (3) `g` always resolves to (c) regardless of invocation site.
// (4) `$crate::g` remains hygienic and continues to resolve to (a).
f();
}
#[allow(unused)]
fn test_hygienic_macro() {
hygienic_macro!();
fn f() {} // (d) no conflict
f(); // resolves to (d)
}
fn main() {}<|fim▁end|> | // |
<|file_name|>KudoEdit.py<|end_file_name|><|fim▁begin|>import sys
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import *
import os
class Window(QtGui.QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.filename = None
self.initUI()
def initUI(self):
self.italic_flag = False
self.underline_flag = False
self.path = os.path.abspath(__file__)
self.icon_path = "/".join(self.path.split("/")[:-1]+["icons"])
self.exitclick = self.add_action("Exit", "Ctrl+Q",
"/".join([self.icon_path,"exit_icon.png"]),
qApp.quit)
self.newclick = self.add_action("New", "Ctrl+N",
"/".join([self.icon_path,"new_icon.png"]),
self.newfile)
self.openclick = self.add_action("Open", "Ctrl+O",
"/".join([self.icon_path,"open_icon.png"]),
self.openfile)
self.saveclick = self.add_action("Save", "Ctrl+S",
"/".join([self.icon_path,"save_icon.png"]),
self.savefile)
self.saveasclick = self.add_action("SaveAs", "Ctrl+Shift+S",
"/".join([self.icon_path,"save_as_icon.gif"]),
self.save_asfile)
self.copyclick = self.add_action("Copy", "Ctrl+C",
"/".join([self.icon_path,"copy_icon.png"]),
self.copy)
self.pasteclick = self.add_action("Paste", "Ctrl+V",
"/".join([self.icon_path,"paste_icon.jpg"]),
self.paste)
#self.printclick = self.add_action("Print", "Ctrl+P",
# "/".join([self.icon_path,"print_icon.jpg"]),
# self.printclick)
self.close_tab_click = self.add_action("Close", "Ctrl+W",
self,
self.close_tab)
self.italicclick = self.add_action("Italic", "Ctrl+I",
"/".join([self.icon_path,"italic_icon.png"]),
self.italic)
self.boldclick = self.add_action("Bold", "Ctrl+B",
"/".join([self.icon_path,"bold_icon.png"]),
self.bold)
self.underlineclick = self.add_action("Underline", "Ctrl+U",
"/".join([self.icon_path,"underline_icon.png"]),
self.underline)
tab = QTextEdit()
self.tab_widget = QTabWidget()
self.tab_widget.tabsClosable()
textEditf = QFont()
layout = QVBoxLayout(tab)
QtCore.QObject.connect(self.tab_widget,
QtCore.SIGNAL('tabCloseRequested(int)'),
self.close_tab)
self.setCentralWidget(self.tab_widget)
self.statusBar()
self.toolbar = self.addToolBar('New')
self.toolbar.addAction(self.newclick)
self.toolbar.addAction(self.saveclick)
self.toolbar.addAction(self.saveasclick)
self.toolbar.addAction(self.openclick)
self.toolbar.addAction(self.exitclick)
self.toolbar.addAction(self.copyclick)
self.toolbar.addAction(self.pasteclick)
self.toolbar.addAction(self.boldclick)
self.toolbar.addAction(self.italicclick)
self.toolbar.addAction(self.underlineclick)
menubar = self.menuBar()
fileMenu = menubar.addMenu('File')
fileMenu.addAction(self.newclick)
fileMenu.addAction(self.openclick)
fileMenu.addAction(self.saveclick)
fileMenu.addAction(self.saveasclick)
fileMenu.addAction(self.close_tab_click)
#fileMenu.addAction(printclick)
fileMenu.addAction(self.exitclick)
editMenu = menubar.addMenu('Edit')
editMenu.addAction(self.copyclick)
editMenu.addAction(self.pasteclick)
viewMenu = menubar.addMenu('View')
viewMenu.addAction(self.italicclick)
viewMenu.addAction(self.boldclick)
viewMenu.addAction(self.underlineclick)
self.showMaximized()
self.show()
def add_action(self, action_name, shortcut=None, icon_path=None, trigger_action=None ):
action = QAction(QIcon(icon_path), action_name, self)
action.setShortcut(shortcut)
action.setStatusTip(action_name)
action.triggered.connect(trigger_action)
return action
def keyReleaseEvent(self, e):
tab_index = self.tab_widget.currentIndex()
tabText = self.tab_widget.tabText(tab_index)
self.tab_widget.tabBar().setTabTextColor(tab_index,
QColor(255,0,0))
if tab_index < 0:
return
if tabText != "untitled*" and tabText[-1] != "*":
tabText = tabText+"*"
self.tab_widget.setTabText(tab_index,tabText)
def close_tab(self):
print "closing tab"
tab_index = self.tab_widget.currentIndex()
if tab_index < 0:
qApp.quit()
return
tabText = self.tab_widget.tabText(tab_index)
if tabText[-1] == "*":
msgBox = QMessageBox()
msgBox.setText("The document has been modified.")
msgBox.setInformativeText("Do you want to save your changes?")
msgBox.setStandardButtons(QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)
msgBox.setDefaultButton(QMessageBox.Save)
ret = msgBox.exec_()
if ret == QMessageBox.Save:
self.savefile()
self.close_tab()
elif ret == QMessageBox.Discard:
pass
elif ret == QMessageBox.Cancel:
return
self.tab_widget.removeTab(tab_index)
"""
def printfile(self):
#print_cmd = 'lp -d NetPrinter filename'
text=self.textEdit.toPlainText()
os.popen(str(text))
#self.textEdit.print_(os.printer)
"""
def italic(self):
italic_button = self.toolbar.widgetForAction(self.italicclick)
italic_icon = QIcon("/".join([self.icon_path,"italic_icon.png"]))
print self.italic_flag
if not self.italic_flag:
new_pixmap = italic_icon.pixmap(QtCore.QSize(20,20),QIcon.Disabled,QIcon.On)
else:
new_pixmap = italic_icon.pixmap(QtCore.QSize(20,20),QIcon.Active, QIcon.On)
new_icon = QIcon(new_pixmap)
italic_button.setIcon(new_icon)
tab_index = self.tab_widget.currentIndex()
textEdit = self.tab_widget.widget(tab_index)
if not textEdit:
return
textEdit.setFontItalic(not self.italic_flag)
self.italic_flag = not self.italic_flag
def bold(self):
bold_button = self.toolbar.widgetForAction(self.boldclick)
bold_icon = QIcon("/".join([self.icon_path,"bold_icon.png"]))
tab_index = self.tab_widget.currentIndex()
textEdit = self.tab_widget.widget(tab_index)
if not textEdit:
return
font_weight = textEdit.fontWeight()
if font_weight == 50:
new_pixmap = bold_icon.pixmap(QtCore.QSize(20,20),QIcon.Disabled,QIcon.On)
font_weight = 75
textEdit.setFontWeight(font_weight)
else:
new_pixmap = bold_icon.pixmap(QtCore.QSize(20,20),QIcon.Active, QIcon.On)
font_weight = 50
textEdit.setFontWeight(font_weight)
new_icon = QIcon(new_pixmap)
bold_button.setIcon(new_icon)
def underline(self):
tab_index = self.tab_widget.currentIndex()
textEdit = self.tab_widget.widget(tab_index)
if not textEdit:
return
if not self.underline_flag:
status = QIcon.Disabled
else:
status = QIcon.Active
textEdit.setFontUnderline(not self.underline_flag)
button = self.toolbar.widgetForAction(self.underlineclick)
icon = QIcon("/".join([self.icon_path,"underline_icon.png"]))
new_pixmap = icon.pixmap(QtCore.QSize(20,20),status,QIcon.On)
new_icon = QIcon(new_pixmap)
button.setIcon(new_icon)
self.underline_flag = not self.underline_flag
def copy(self):
tab_index = self.tab_widget.currentIndex()
if tab_index < 0:
return
textEdit = self.tab_widget.widget(tab_index)
textEdit.copy()
def paste(self):
tab_index = self.tab_widget.currentIndex()
if tab_index < 0:
return
textEdit = self.tab_widget.widget(tab_index)
textEdit.paste()
def savefile(self):
tab_index = self.tab_widget.currentIndex()
if tab_index < 0:
return
textEdit = self.tab_widget.widget(tab_index)
filename = self.tab_widget.tabText(tab_index)
if filename == "untitled*":
self.save_asfile()<|fim▁hole|> return
if filename[-1] == "*":
filename = filename[:-1]
f=open(filename, 'w')
f.write(textEdit.toPlainText())
f.close()
self.tab_widget.setTabText(tab_index,filename)
self.tab_widget.tabBar().setTabTextColor(tab_index, QColor(0,0,0))
def save_asfile(self):
tab_index = self.tab_widget.currentIndex()
if tab_index < 0:
return
textEdit = self.tab_widget.widget(tab_index)
filename = QFileDialog.getSaveFileName(self,"Save File",os.getcwd())
print filename
f=open(filename, 'w')
f.write(textEdit.toPlainText())
f.close()
self.tab_widget.tabBar().setTabTextColor(tab_index, QColor(0,0,0))
self.tab_widget.setTabText(tab_index,filename.split("/")[-1])
def openfile(self):
filename = QFileDialog.getOpenFileName(self,"Open File",os.getcwd())
print filename
f=open(filename, 'r')
text=f.read()
f.close()
textEdit = QTextEdit()
textEdit.setText(text)
self.tab_widget.addTab(textEdit,filename.split("/")[-1])
tab_count = self.tab_widget.count()
tabbar = self.tab_widget.tabBar()
close_tab_click = QAction(QIcon("/".join([self.icon_path,"dialog-close.svg"])),"",self)
close_tab_click.triggered.connect(self.close_tab)
but = QToolButton()
but.setDefaultAction(close_tab_click)
self.tab_widget.tabBar().setTabButton(tab_count-1,QTabBar.RightSide,but)
self.tab_widget.tabsClosable()
self.show()
def newfile(self):
tab = QTextEdit()
layout = QVBoxLayout(tab)
self.tab_widget.addTab(tab,"untitled*")
tab_count = self.tab_widget.count()
tabbar = self.tab_widget.tabBar()
close_tab_click = QAction(QIcon("/".join([self.icon_path,"dialog-close.svg"])),"",self)
close_tab_click.triggered.connect(self.close_tab)
but = QToolButton()
but.setDefaultAction(close_tab_click)
self.tab_widget.tabBar().setTabButton(tab_count-1,QTabBar.RightSide,but)
self.tab_widget.tabsClosable()
self.show()
def closeEvent(self, event):
reply = QMessageBox.question(self,
'Message',
"Are you sure you want to quit?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def KudoEdit():
app = QApplication(sys.argv)
window = Window()
sys.exit(app.exec_())
#KudoEdit()<|fim▁end|> | |
<|file_name|>personal.rs<|end_file_name|><|fim▁begin|>// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use std::str::FromStr;
use jsonrpc_core::{IoHandler, GenericIoHandler};
use util::{U256, Uint, Address};
use ethcore::account_provider::AccountProvider;
use v1::{PersonalClient, Personal};
use v1::tests::helpers::TestMinerService;
use ethcore::client::TestBlockChainClient;
use ethcore::transaction::{Action, Transaction};
struct PersonalTester {
accounts: Arc<AccountProvider>,
io: IoHandler,
miner: Arc<TestMinerService>,
// these unused fields are necessary to keep the data alive
// as the handler has only weak pointers.
_client: Arc<TestBlockChainClient>,
}
fn blockchain_client() -> Arc<TestBlockChainClient> {
let client = TestBlockChainClient::new();
Arc::new(client)
}
fn accounts_provider() -> Arc<AccountProvider> {
Arc::new(AccountProvider::transient_provider())
}
fn miner_service() -> Arc<TestMinerService> {
Arc::new(TestMinerService::default())
}
fn setup() -> PersonalTester {
let accounts = accounts_provider();
let client = blockchain_client();
let miner = miner_service();
let personal = PersonalClient::new(&accounts, &client, &miner, false);
let io = IoHandler::new();
io.add_delegate(personal.to_delegate());
let tester = PersonalTester {
accounts: accounts,
io: io,
miner: miner,
_client: client,
};
tester
}
#[test]
fn accounts() {
let tester = setup();
let address = tester.accounts.new_account("").unwrap();
let request = r#"{"jsonrpc": "2.0", "method": "personal_listAccounts", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":[""#.to_owned() + &format!("0x{:?}", address) + r#""],"id":1}"#;
assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned()));
}
#[test]
fn new_account() {
let tester = setup();
let request = r#"{"jsonrpc": "2.0", "method": "personal_newAccount", "params": ["pass"], "id": 1}"#;
let res = tester.io.handle_request_sync(request);
let accounts = tester.accounts.accounts().unwrap();
assert_eq!(accounts.len(), 1);
let address = accounts[0];
let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:?}", address).as_ref() + r#"","id":1}"#;
assert_eq!(res, Some(response));
}
#[test]
fn sign_and_send_transaction_with_invalid_password() {
let tester = setup();
let address = tester.accounts.new_account("password123").unwrap();
let request = r#"{
"jsonrpc": "2.0",
"method": "personal_signAndSendTransaction",
"params": [{
"from": ""#.to_owned() + format!("0x{:?}", address).as_ref() + r#"",
"to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567",
"gas": "0x76c0",
"gasPrice": "0x9184e72a000",
"value": "0x9184e72a"
}, "password321"],
"id": 1<|fim▁hole|>
let response = r#"{"jsonrpc":"2.0","error":{"code":-32021,"message":"Account password is invalid or account does not exist.","data":"SStore(InvalidPassword)"},"id":1}"#;
assert_eq!(tester.io.handle_request_sync(request.as_ref()), Some(response.into()));
}
#[test]
fn sign_and_send_transaction() {
let tester = setup();
let address = tester.accounts.new_account("password123").unwrap();
let request = r#"{
"jsonrpc": "2.0",
"method": "personal_signAndSendTransaction",
"params": [{
"from": ""#.to_owned() + format!("0x{:?}", address).as_ref() + r#"",
"to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567",
"gas": "0x76c0",
"gasPrice": "0x9184e72a000",
"value": "0x9184e72a"
}, "password123"],
"id": 1
}"#;
let t = Transaction {
nonce: U256::zero(),
gas_price: U256::from(0x9184e72a000u64),
gas: U256::from(0x76c0),
action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
value: U256::from(0x9184e72au64),
data: vec![]
};
tester.accounts.unlock_account_temporarily(address, "password123".into()).unwrap();
let signature = tester.accounts.sign(address, None, t.hash(None)).unwrap();
let t = t.with_signature(signature, None);
let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:?}", t.hash()).as_ref() + r#"","id":1}"#;
assert_eq!(tester.io.handle_request_sync(request.as_ref()), Some(response));
tester.miner.last_nonces.write().insert(address.clone(), U256::zero());
let t = Transaction {
nonce: U256::one(),
gas_price: U256::from(0x9184e72a000u64),
gas: U256::from(0x76c0),
action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
value: U256::from(0x9184e72au64),
data: vec![]
};
tester.accounts.unlock_account_temporarily(address, "password123".into()).unwrap();
let signature = tester.accounts.sign(address, None, t.hash(None)).unwrap();
let t = t.with_signature(signature, None);
let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:?}", t.hash()).as_ref() + r#"","id":1}"#;
assert_eq!(tester.io.handle_request_sync(request.as_ref()), Some(response));
}
#[test]
fn should_unlock_account_temporarily() {
let tester = setup();
let address = tester.accounts.new_account("password123").unwrap();
let request = r#"{
"jsonrpc": "2.0",
"method": "personal_unlockAccount",
"params": [
""#.to_owned() + &format!("0x{:?}", address) + r#"",
"password123",
"0x100"
],
"id": 1
}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(tester.io.handle_request_sync(&request), Some(response.into()));
assert!(tester.accounts.sign(address, None, Default::default()).is_ok(), "Should unlock account.");
}
#[test]
fn should_unlock_account_permanently() {
let tester = setup();
let address = tester.accounts.new_account("password123").unwrap();
let request = r#"{
"jsonrpc": "2.0",
"method": "personal_unlockAccount",
"params": [
""#.to_owned() + &format!("0x{:?}", address) + r#"",
"password123",
null
],
"id": 1
}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(tester.io.handle_request_sync(&request), Some(response.into()));
assert!(tester.accounts.sign(address, None, Default::default()).is_ok(), "Should unlock account.");
}<|fim▁end|> | }"#; |
Subsets and Splits