ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40b9d3884901ae424c2278cdda19d9149731cba | from __future__ import unicode_literals
import os,json,sys
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import youtube_dl
ydl_opts = {}
_url_='' #provide the url
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
meta = ydl.extract_info(_url_, download=False) #download set to no i.e false
_json=json.dumps(meta)
|
py | b40b9d69deeb8bb97518acacf484e4be1d31f616 | import cartoframes
from carto.auth import APIKeyAuthClient
from carto.exceptions import CartoException
from carto.sql import BatchSQLClient, SQLClient
from ..common.query_response import LongitudeQueryResponse
from .base import DataSource, LongitudeQueryCannotBeExecutedException
class CartoDataSource(DataSource):
SUBDOMAIN_URL_PATTERN = "https://%s.carto.com"
ON_PREMISES_URL_PATTERN = "https://%s/user/%s"
DEFAULT_API_VERSION = 'v2'
def __init__(self, user, api_key, options={}):
super().__init__(options)
self.do_post = options.get('do_post', False)
self.parse_json = options.get('parse_json', True)
self.format = options.get('format', 'json')
self.base_url_option = options.get('base_url', '')
self.api_version = options.get('api_version', self.DEFAULT_API_VERSION)
self.batch = options.get('batch', False)
self.user = user
self.api_key = api_key
self.base_url = self._generate_base_url(user, self.base_url_option)
# Carto Context for DataFrame handling
self._carto_context = None
# Carto client for COPYs
self._copy_client = None
self._auth_client = APIKeyAuthClient(api_key=api_key, base_url=self.base_url)
self._sql_client = SQLClient(self._auth_client, api_version=self.api_version)
self._batch_client = None
if self.batch:
self._batch_client = BatchSQLClient(self._auth_client)
@property
def cc(self):
"""
Creates and returns a CartoContext object to work with Panda Dataframes
:return:
"""
# TODO: The CartoContext documentaton says that SSL must be disabled sometimes if an on
# premise host is used.
# We are not taking this into account. It would need to create a requests.Session()
# object, set its SSL to false and pass it to the CartoContext init.
if self._carto_context is None:
self._carto_context = cartoframes.CartoContext(
base_url=self.base_url, api_key=self.api_key
)
return self._carto_context
def _generate_base_url(self, user, base_url_option):
if base_url_option:
base_url = self.ON_PREMISES_URL_PATTERN % (base_url_option, user)
else:
base_url = self.SUBDOMAIN_URL_PATTERN % user
return base_url
def execute_query(self, query_template, params, query_config, **opts):
# TODO: Here we are parsing the parameters and taking responsability for it. We do not make
# any safe parsing as this will be used in a backend-to-backend context and we build our
# own queries.
# ---
# This is also problematic as quoting is not done and relies in the query template
# ---
# Can we use the .mogrify method in psycopg2 to render a query as it is going to be
# executed ? -> NO
# -> .mogrify is a cursor method but in CARTO connections we lack a cursor.
# ---
# There is an open issue in CARTO about having separated parameters and binding them in
# the server:
# https://github.com/CartoDB/Geographica-Product-Coordination/issues/57
params = {k: "'" + v + "'" for k, v in params.items()}
formatted_query = query_template % params
try:
return self._sql_client.send(
formatted_query,
parse_json=self.parse_json,
do_post=self.do_post,
format=self.format
)
except CartoException as e:
raise LongitudeQueryCannotBeExecutedException(str(e))
def parse_response(self, response):
return LongitudeQueryResponse(
rows=response['rows'],
fields=response['fields'],
meta={
'response_time': response.get('time'),
'total_rows': response.get('total_rows')
}
)
def copy_from(self, data, filepath, to_table):
if self._copy_client is None:
from carto.sql import CopySQLClient
self._copy_client = CopySQLClient(self._auth_client)
headers = data.readline().decode('utf-8')
data.seek(0)
from_query = 'COPY %s (%s) FROM stdin WITH (FORMAT csv, HEADER true)' % (to_table, headers)
return self._copy_client.copyfrom_file_object(from_query, data)
def read_dataframe(self, table_name='', *args, **kwargs):
return self.cc.read(table_name=table_name, *args, **kwargs)
def query_dataframe(self, query='', *args, **kwargs):
return self.cc.query(query=query, *args, **kwargs)
def write_dataframe(self, df, table_name='', *args, **kwargs):
return self.cc.write(df=df, table_name=table_name, *args, **kwargs)
|
py | b40b9dbd2657c14968ce89d9c553f5419aa20e41 | """
Algorithm for calculating the most cost-efficient sequence for converting one string
into another.
The only allowed operations are
--- Cost to copy a character is copy_cost
--- Cost to replace a character is replace_cost
--- Cost to delete a character is delete_cost
--- Cost to insert a character is insert_cost
"""
def compute_transform_tables(
source_string: str,
destination_string: str,
copy_cost: int,
replace_cost: int,
delete_cost: int,
insert_cost: int,
) -> tuple[list[list[int]], list[list[str]]]:
source_seq = list(source_string)
destination_seq = list(destination_string)
len_source_seq = len(source_seq)
len_destination_seq = len(destination_seq)
costs = [
[0 for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1)
]
ops = [
["0" for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1)
]
for i in range(1, len_source_seq + 1):
costs[i][0] = i * delete_cost
ops[i][0] = "D%c" % source_seq[i - 1]
for i in range(1, len_destination_seq + 1):
costs[0][i] = i * insert_cost
ops[0][i] = "I%c" % destination_seq[i - 1]
for i in range(1, len_source_seq + 1):
for j in range(1, len_destination_seq + 1):
if source_seq[i - 1] == destination_seq[j - 1]:
costs[i][j] = costs[i - 1][j - 1] + copy_cost
ops[i][j] = "C%c" % source_seq[i - 1]
else:
costs[i][j] = costs[i - 1][j - 1] + replace_cost
ops[i][j] = "R%c" % source_seq[i - 1] + str(destination_seq[j - 1])
if costs[i - 1][j] + delete_cost < costs[i][j]:
costs[i][j] = costs[i - 1][j] + delete_cost
ops[i][j] = "D%c" % source_seq[i - 1]
if costs[i][j - 1] + insert_cost < costs[i][j]:
costs[i][j] = costs[i][j - 1] + insert_cost
ops[i][j] = "I%c" % destination_seq[j - 1]
return costs, ops
def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]:
if i == 0 and j == 0:
return []
else:
if ops[i][j][0] == "C" or ops[i][j][0] == "R":
seq = assemble_transformation(ops, i - 1, j - 1)
seq.append(ops[i][j])
return seq
elif ops[i][j][0] == "D":
seq = assemble_transformation(ops, i - 1, j)
seq.append(ops[i][j])
return seq
else:
seq = assemble_transformation(ops, i, j - 1)
seq.append(ops[i][j])
return seq
if __name__ == "__main__":
_, operations = compute_transform_tables("Python", "Algorithms", -1, 1, 2, 2)
m = len(operations)
n = len(operations[0])
sequence = assemble_transformation(operations, m - 1, n - 1)
string = list("Python")
i = 0
cost = 0
with open("min_cost.txt", "w") as file:
for op in sequence:
print("".join(string))
if op[0] == "C":
file.write("%-16s" % "Copy %c" % op[1])
file.write("\t\t\t" + "".join(string))
file.write("\r\n")
cost -= 1
elif op[0] == "R":
string[i] = op[2]
file.write("%-16s" % ("Replace %c" % op[1] + " with " + str(op[2])))
file.write("\t\t" + "".join(string))
file.write("\r\n")
cost += 1
elif op[0] == "D":
string.pop(i)
file.write("%-16s" % "Delete %c" % op[1])
file.write("\t\t\t" + "".join(string))
file.write("\r\n")
cost += 2
else:
string.insert(i, op[1])
file.write("%-16s" % "Insert %c" % op[1])
file.write("\t\t\t" + "".join(string))
file.write("\r\n")
cost += 2
i += 1
print("".join(string))
print("Cost: ", cost)
file.write("\r\nMinimum cost: " + str(cost))
|
py | b40b9e26f90d13c944d37142b0deb81ed60ce130 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine.clients import progress
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources import scheduler_hints as sh
from heat.engine.resources import volume_base as vb
from heat.engine import support
from heat.engine import translation
LOG = logging.getLogger(__name__)
class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
"""A resource that implements Cinder volumes.
Cinder volume is a storage in the form of block devices. It can be used,
for example, for providing storage to instance. Volume supports creation
from snapshot, backup or image. Also volume can be created only by size.
"""
PROPERTIES = (
AVAILABILITY_ZONE, SIZE, SNAPSHOT_ID, BACKUP_ID, NAME,
DESCRIPTION, VOLUME_TYPE, METADATA, IMAGE_REF, IMAGE,
SOURCE_VOLID, CINDER_SCHEDULER_HINTS, READ_ONLY, MULTI_ATTACH,
) = (
'availability_zone', 'size', 'snapshot_id', 'backup_id', 'name',
'description', 'volume_type', 'metadata', 'imageRef', 'image',
'source_volid', 'scheduler_hints', 'read_only', 'multiattach',
)
ATTRIBUTES = (
AVAILABILITY_ZONE_ATTR, SIZE_ATTR, SNAPSHOT_ID_ATTR, DISPLAY_NAME_ATTR,
DISPLAY_DESCRIPTION_ATTR, VOLUME_TYPE_ATTR, METADATA_ATTR,
SOURCE_VOLID_ATTR, STATUS, CREATED_AT, BOOTABLE, METADATA_VALUES_ATTR,
ENCRYPTED_ATTR, ATTACHMENTS, ATTACHMENTS_LIST, MULTI_ATTACH_ATTR,
) = (
'availability_zone', 'size', 'snapshot_id', 'display_name',
'display_description', 'volume_type', 'metadata',
'source_volid', 'status', 'created_at', 'bootable', 'metadata_values',
'encrypted', 'attachments', 'attachments_list', 'multiattach',
)
properties_schema = {
AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_('The availability zone in which the volume will be created.')
),
SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the volume in GB. '
'On update only increase in size is supported. This property '
'is required unless property %(backup)s or %(vol)s or '
'%(snapshot)s is specified.')
% dict(backup=BACKUP_ID,
vol=SOURCE_VOLID,
snapshot=SNAPSHOT_ID),
update_allowed=True,
constraints=[
constraints.Range(min=1),
]
),
SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('If specified, the snapshot to create the volume from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BACKUP_ID: properties.Schema(
properties.Schema.STRING,
_('If specified, the backup to create the volume from.'),
update_allowed=True,
constraints=[
constraints.CustomConstraint('cinder.backup')
]
),
NAME: properties.Schema(
properties.Schema.STRING,
_('A name used to distinguish the volume.'),
update_allowed=True,
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('A description of the volume.'),
update_allowed=True,
),
VOLUME_TYPE: properties.Schema(
properties.Schema.STRING,
_('If specified, the type of volume to use, mapping to a '
'specific backend.'),
constraints=[
constraints.CustomConstraint('cinder.vtype')
],
update_allowed=True
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Key/value pairs to associate with the volume.'),
update_allowed=True,
default={}
),
IMAGE_REF: properties.Schema(
properties.Schema.STRING,
_('The ID of the image to create the volume from.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
message=_('Use property %s.') % IMAGE,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.1'
)
)
),
IMAGE: properties.Schema(
properties.Schema.STRING,
_('If specified, the name or ID of the image to create the '
'volume from.'),
constraints=[
constraints.CustomConstraint('glance.image')
]
),
SOURCE_VOLID: properties.Schema(
properties.Schema.STRING,
_('If specified, the volume to use as source.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
CINDER_SCHEDULER_HINTS: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key-value pairs specified by the client to help '
'the Cinder scheduler creating a volume.'),
support_status=support.SupportStatus(version='2015.1')
),
READ_ONLY: properties.Schema(
properties.Schema.BOOLEAN,
_('Enables or disables read-only access mode of volume.'),
support_status=support.SupportStatus(version='5.0.0'),
update_allowed=True,
),
MULTI_ATTACH: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether allow the volume to be attached more than once.'),
support_status=support.SupportStatus(version='6.0.0'),
default=False
),
}
attributes_schema = {
AVAILABILITY_ZONE_ATTR: attributes.Schema(
_('The availability zone in which the volume is located.'),
type=attributes.Schema.STRING
),
SIZE_ATTR: attributes.Schema(
_('The size of the volume in GB.'),
type=attributes.Schema.STRING
),
SNAPSHOT_ID_ATTR: attributes.Schema(
_('The snapshot the volume was created from, if any.'),
type=attributes.Schema.STRING
),
DISPLAY_NAME_ATTR: attributes.Schema(
_('Name of the volume.'),
type=attributes.Schema.STRING
),
DISPLAY_DESCRIPTION_ATTR: attributes.Schema(
_('Description of the volume.'),
type=attributes.Schema.STRING
),
VOLUME_TYPE_ATTR: attributes.Schema(
_('The type of the volume mapping to a backend, if any.'),
type=attributes.Schema.STRING
),
METADATA_ATTR: attributes.Schema(
_('Key/value pairs associated with the volume.'),
type=attributes.Schema.STRING
),
SOURCE_VOLID_ATTR: attributes.Schema(
_('The volume used as source, if any.'),
type=attributes.Schema.STRING
),
STATUS: attributes.Schema(
_('The current status of the volume.'),
type=attributes.Schema.STRING
),
CREATED_AT: attributes.Schema(
_('The timestamp indicating volume creation.'),
type=attributes.Schema.STRING
),
BOOTABLE: attributes.Schema(
_('Boolean indicating if the volume can be booted or not.'),
type=attributes.Schema.STRING
),
METADATA_VALUES_ATTR: attributes.Schema(
_('Key/value pairs associated with the volume in raw dict form.'),
type=attributes.Schema.MAP
),
ENCRYPTED_ATTR: attributes.Schema(
_('Boolean indicating if the volume is encrypted or not.'),
type=attributes.Schema.STRING
),
ATTACHMENTS: attributes.Schema(
_('A string representation of the list of attachments of the '
'volume.'),
type=attributes.Schema.STRING,
support_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s.') % ATTACHMENTS_LIST,
version='9.0.0',
previous_status=support.SupportStatus(
status=support.SUPPORTED,
version='2015.1'
)
)
),
ATTACHMENTS_LIST: attributes.Schema(
_('The list of attachments of the volume.'),
type=attributes.Schema.LIST,
support_status=support.SupportStatus(version='9.0.0'),
),
MULTI_ATTACH_ATTR: attributes.Schema(
_('Boolean indicating whether allow the volume to be attached '
'more than once.'),
type=attributes.Schema.BOOLEAN,
support_status=support.SupportStatus(version='6.0.0'),
),
}
_volume_creating_status = ['creating', 'restoring-backup', 'downloading']
entity = 'volumes'
def translation_rules(self, props):
return [
translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
[self.IMAGE],
value_path=[self.IMAGE_REF]
)
]
def _name(self):
name = self.properties[self.NAME]
if name:
return name
return super(CinderVolume, self)._name()
def _description(self):
return self.properties[self.DESCRIPTION]
def _create_arguments(self):
arguments = {
'size': self.properties[self.SIZE],
'availability_zone': self.properties[self.AVAILABILITY_ZONE],
}
scheduler_hints = self._scheduler_hints(
self.properties[self.CINDER_SCHEDULER_HINTS])
if scheduler_hints:
arguments[self.CINDER_SCHEDULER_HINTS] = scheduler_hints
if self.properties[self.IMAGE]:
arguments['imageRef'] = self.client_plugin(
'glance').find_image_by_name_or_id(
self.properties[self.IMAGE])
elif self.properties[self.IMAGE_REF]:
arguments['imageRef'] = self.properties[self.IMAGE_REF]
optionals = (self.SNAPSHOT_ID, self.VOLUME_TYPE, self.SOURCE_VOLID,
self.METADATA, self.MULTI_ATTACH)
arguments.update((prop, self.properties[prop]) for prop in optionals
if self.properties[prop] is not None)
return arguments
def _resolve_attribute(self, name):
if self.resource_id is None:
return
cinder = self.client()
vol = cinder.volumes.get(self.resource_id)
if name == self.METADATA_ATTR:
return six.text_type(jsonutils.dumps(vol.metadata))
elif name == self.METADATA_VALUES_ATTR:
return vol.metadata
if name == self.DISPLAY_NAME_ATTR:
return vol.name
elif name == self.DISPLAY_DESCRIPTION_ATTR:
return vol.description
elif name == self.ATTACHMENTS_LIST:
return vol.attachments
return six.text_type(getattr(vol, name))
def check_create_complete(self, vol_id):
complete = super(CinderVolume, self).check_create_complete(vol_id)
# Cinder just supports update read only for volume in available,
# if we update in handle_create(), maybe the volume still in
# creating, then cinder will raise an exception
if complete:
self._store_config_default_properties()
self._update_read_only(self.properties[self.READ_ONLY])
return complete
def _store_config_default_properties(self, attributes=None):
"""Method for storing default values of properties in resource data.
Some properties have default values, specified in project configuration
file, so cannot be hardcoded into properties_schema, but should be
stored for further using. So need to get created resource and take
required property's value.
"""
if attributes is None:
attributes = self._show_resource()
if attributes.get('volume_type') is not None:
self.data_set(self.VOLUME_TYPE, attributes['volume_type'])
else:
self.data_delete(self.VOLUME_TYPE)
def _extend_volume(self, new_size):
try:
self.client().volumes.extend(self.resource_id, new_size)
except Exception as ex:
if self.client_plugin().is_client_exception(ex):
raise exception.Error(_(
"Failed to extend volume %(vol)s - %(err)s") % {
'vol': self.resource_id, 'err': six.text_type(ex)})
else:
raise
return True
def _update_read_only(self, read_only_flag):
if read_only_flag is not None:
self.client().volumes.update_readonly_flag(self.resource_id,
read_only_flag)
return True
def _check_extend_volume_complete(self):
vol = self.client().volumes.get(self.resource_id)
if vol.status == 'extending':
LOG.debug("Volume %s is being extended", vol.id)
return False
if vol.status != 'available':
LOG.info("Resize failed: Volume %(vol)s "
"is in %(status)s state.",
{'vol': vol.id, 'status': vol.status})
raise exception.ResourceUnknownStatus(
resource_status=vol.status,
result=_('Volume resize failed'))
LOG.info('Volume %(id)s resize complete', {'id': vol.id})
return True
def _backup_restore(self, vol_id, backup_id):
try:
self.client().restores.restore(backup_id, vol_id)
except Exception as ex:
if self.client_plugin().is_client_exception(ex):
raise exception.Error(_(
"Failed to restore volume %(vol)s from backup %(backup)s "
"- %(err)s") % {'vol': vol_id,
'backup': backup_id,
'err': ex})
else:
raise
return True
def _check_backup_restore_complete(self):
vol = self.client().volumes.get(self.resource_id)
if vol.status == 'restoring-backup':
LOG.debug("Volume %s is being restoring from backup", vol.id)
return False
if vol.status != 'available':
LOG.info("Restore failed: Volume %(vol)s is in %(status)s "
"state.", {'vol': vol.id, 'status': vol.status})
raise exception.ResourceUnknownStatus(
resource_status=vol.status,
result=_('Volume backup restore failed'))
LOG.info('Volume %s backup restore complete', vol.id)
return True
def needs_replace_failed(self):
if not self.resource_id:
return True
with self.client_plugin().ignore_not_found:
vol = self.client().volumes.get(self.resource_id)
return vol.status in ('error', 'deleting')
return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
vol = None
cinder = self.client()
prg_resize = None
prg_attach = None
prg_detach = None
prg_restore = None
prg_access = None
# update the name and description for cinder volume
if self.NAME in prop_diff or self.DESCRIPTION in prop_diff:
vol = cinder.volumes.get(self.resource_id)
update_name = (prop_diff.get(self.NAME) or
self.properties[self.NAME])
update_description = (prop_diff.get(self.DESCRIPTION) or
self.properties[self.DESCRIPTION])
kwargs = self._fetch_name_and_description(update_name,
update_description)
cinder.volumes.update(vol, **kwargs)
# update the metadata for cinder volume
if self.METADATA in prop_diff:
if not vol:
vol = cinder.volumes.get(self.resource_id)
metadata = prop_diff.get(self.METADATA)
cinder.volumes.update_all_metadata(vol, metadata)
# retype
if self.VOLUME_TYPE in prop_diff:
if not vol:
vol = cinder.volumes.get(self.resource_id)
new_vol_type = prop_diff.get(self.VOLUME_TYPE)
cinder.volumes.retype(vol, new_vol_type, 'never')
# update read_only access mode
if self.READ_ONLY in prop_diff:
if not vol:
vol = cinder.volumes.get(self.resource_id)
flag = prop_diff.get(self.READ_ONLY)
prg_access = progress.VolumeUpdateAccessModeProgress(
read_only=flag)
prg_detach, prg_attach = self._detach_attach_progress(vol)
# restore the volume from backup
if self.BACKUP_ID in prop_diff:
prg_restore = progress.VolumeBackupRestoreProgress(
vol_id=self.resource_id,
backup_id=prop_diff.get(self.BACKUP_ID))
# extend volume size
if self.SIZE in prop_diff:
if not vol:
vol = cinder.volumes.get(self.resource_id)
new_size = prop_diff[self.SIZE]
if new_size < vol.size:
raise exception.NotSupported(feature=_("Shrinking volume"))
elif new_size > vol.size:
prg_resize = progress.VolumeResizeProgress(size=new_size)
prg_detach, prg_attach = self._detach_attach_progress(vol)
return prg_restore, prg_detach, prg_resize, prg_access, prg_attach
def _detach_attach_progress(self, vol):
prg_attach = None
prg_detach = None
if vol.attachments:
# NOTE(pshchelo):
# this relies on current behavior of cinder attachments,
# i.e. volume attachments is a list with len<=1,
# so the volume can be attached only to single instance,
# and id of attachment is the same as id of the volume
# it describes, so detach/attach the same volume
# will not change volume attachment id.
server_id = vol.attachments[0]['server_id']
device = vol.attachments[0]['device']
attachment_id = vol.attachments[0]['id']
prg_detach = progress.VolumeDetachProgress(
server_id, vol.id, attachment_id)
prg_attach = progress.VolumeAttachProgress(
server_id, vol.id, device)
return prg_detach, prg_attach
def _detach_volume_to_complete(self, prg_detach):
if not prg_detach.called:
self.client_plugin('nova').detach_volume(prg_detach.srv_id,
prg_detach.attach_id)
prg_detach.called = True
return False
if not prg_detach.cinder_complete:
cinder_complete_res = self.client_plugin(
).check_detach_volume_complete(prg_detach.vol_id)
prg_detach.cinder_complete = cinder_complete_res
return False
if not prg_detach.nova_complete:
prg_detach.nova_complete = self.client_plugin(
'nova').check_detach_volume_complete(prg_detach.srv_id,
prg_detach.attach_id)
return False
def _attach_volume_to_complete(self, prg_attach):
if not prg_attach.called:
prg_attach.called = self.client_plugin('nova').attach_volume(
prg_attach.srv_id, prg_attach.vol_id, prg_attach.device)
return False
if not prg_attach.complete:
prg_attach.complete = self.client_plugin(
).check_attach_volume_complete(prg_attach.vol_id)
return prg_attach.complete
def check_update_complete(self, checkers):
prg_restore, prg_detach, prg_resize, prg_access, prg_attach = checkers
if prg_restore:
if not prg_restore.called:
prg_restore.called = self._backup_restore(
prg_restore.vol_id,
prg_restore.backup_id)
return False
if not prg_restore.complete:
prg_restore.complete = self._check_backup_restore_complete()
return prg_restore.complete and not prg_resize
if not prg_resize and not prg_access:
return True
# detach volume
if prg_detach:
if not prg_detach.nova_complete:
self._detach_volume_to_complete(prg_detach)
return False
# resize volume
if prg_resize:
if not prg_resize.called:
prg_resize.called = self._extend_volume(prg_resize.size)
return False
if not prg_resize.complete:
prg_resize.complete = self._check_extend_volume_complete()
return prg_resize.complete and not prg_attach
# update read_only access mode
if prg_access:
if not prg_access.called:
prg_access.called = self._update_read_only(
prg_access.read_only)
return False
# reattach volume back
if prg_attach:
return self._attach_volume_to_complete(prg_attach)
return True
def handle_snapshot(self):
backup = self.client().backups.create(self.resource_id)
self.data_set('backup_id', backup.id)
return backup.id
def check_snapshot_complete(self, backup_id):
backup = self.client().backups.get(backup_id)
if backup.status == 'creating':
return False
if backup.status == 'available':
return True
raise exception.Error(backup.fail_reason)
def handle_delete_snapshot(self, snapshot):
backup_id = snapshot['resource_data'].get('backup_id')
if not backup_id:
return
try:
self.client().backups.delete(backup_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
return
else:
return backup_id
def check_delete_snapshot_complete(self, backup_id):
if not backup_id:
return True
try:
self.client().backups.get(backup_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
return True
else:
return False
def _build_exclusive_options(self):
exclusive_options = []
allow_no_size_options = []
if self.properties.get(self.SNAPSHOT_ID):
exclusive_options.append(self.SNAPSHOT_ID)
allow_no_size_options.append(self.SNAPSHOT_ID)
if self.properties.get(self.SOURCE_VOLID):
exclusive_options.append(self.SOURCE_VOLID)
allow_no_size_options.append(self.SOURCE_VOLID)
if self.properties.get(self.IMAGE):
exclusive_options.append(self.IMAGE)
if self.properties.get(self.IMAGE_REF):
exclusive_options.append(self.IMAGE_REF)
return exclusive_options, allow_no_size_options
def _validate_create_sources(self):
exclusive_options, allow_no_size_ops = self._build_exclusive_options()
size = self.properties.get(self.SIZE)
if (size is None and
(len(allow_no_size_ops) != 1 or len(exclusive_options) != 1)):
msg = (_('If neither "%(backup_id)s" nor "%(size)s" is '
'provided, one and only one of "%(source_vol)s", '
'"%(snapshot_id)s" must be specified, but currently '
'specified options: %(exclusive_options)s.')
% {'backup_id': self.BACKUP_ID,
'size': self.SIZE,
'source_vol': self.SOURCE_VOLID,
'snapshot_id': self.SNAPSHOT_ID,
'exclusive_options': exclusive_options})
raise exception.StackValidationFailed(message=msg)
elif size and len(exclusive_options) > 1:
msg = (_('If "%(size)s" is provided, only one of '
'"%(image)s", "%(image_ref)s", "%(source_vol)s", '
'"%(snapshot_id)s" can be specified, but currently '
'specified options: %(exclusive_options)s.')
% {'size': self.SIZE,
'image': self.IMAGE,
'image_ref': self.IMAGE_REF,
'source_vol': self.SOURCE_VOLID,
'snapshot_id': self.SNAPSHOT_ID,
'exclusive_options': exclusive_options})
raise exception.StackValidationFailed(message=msg)
def validate(self):
"""Validate provided params."""
res = super(CinderVolume, self).validate()
if res is not None:
return res
# can not specify both image and imageRef
image = self.properties.get(self.IMAGE)
imageRef = self.properties.get(self.IMAGE_REF)
if image and imageRef:
raise exception.ResourcePropertyConflict(self.IMAGE,
self.IMAGE_REF)
# if not create from backup, need to check other create sources
if not self.properties.get(self.BACKUP_ID):
self._validate_create_sources()
def handle_restore(self, defn, restore_data):
backup_id = restore_data['resource_data']['backup_id']
# we can't ignore 'size' property: if user update the size
# of volume after snapshot, we need to change to old size
# when restore the volume.
ignore_props = (
self.IMAGE_REF, self.IMAGE, self.SOURCE_VOLID)
props = dict(
(key, value) for (key, value) in
six.iteritems(defn.properties(self.properties_schema))
if key not in ignore_props and value is not None)
props[self.BACKUP_ID] = backup_id
return defn.freeze(properties=props)
def parse_live_resource_data(self, resource_properties, resource_data):
volume_reality = {}
if (resource_data.get(self.METADATA) and
resource_data.get(self.METADATA).get(
self.READ_ONLY) is not None):
read_only = resource_data.get(self.METADATA).pop(self.READ_ONLY)
volume_reality.update({self.READ_ONLY: read_only})
old_vt = self.data().get(self.VOLUME_TYPE)
new_vt = resource_data.get(self.VOLUME_TYPE)
if old_vt != new_vt:
volume_reality.update({self.VOLUME_TYPE: new_vt})
self._store_config_default_properties(dict(volume_type=new_vt))
props_keys = [self.SIZE, self.NAME, self.DESCRIPTION,
self.METADATA, self.BACKUP_ID]
for key in props_keys:
volume_reality.update({key: resource_data.get(key)})
return volume_reality
class CinderVolumeAttachment(vb.BaseVolumeAttachment):
"""Resource for associating volume to instance.
Resource for associating existing volume to instance. Also, the location
where the volume is exposed on the instance can be specified.
"""
PROPERTIES = (
INSTANCE_ID, VOLUME_ID, DEVICE,
) = (
'instance_uuid', 'volume_id', 'mountpoint',
)
properties_schema = {
INSTANCE_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the server to which the volume attaches.'),
required=True,
update_allowed=True
),
VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the volume to be attached.'),
required=True,
update_allowed=True,
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
DEVICE: properties.Schema(
properties.Schema.STRING,
_('The location where the volume is exposed on the instance. This '
'assignment may not be honored and it is advised that the path '
'/dev/disk/by-id/virtio-<VolumeId> be used instead.'),
update_allowed=True
),
}
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
prg_attach = None
prg_detach = None
if prop_diff:
# Even though some combinations of changed properties
# could be updated in UpdateReplace manner,
# we still first detach the old resource so that
# self.resource_id is not replaced prematurely
volume_id = self.properties[self.VOLUME_ID]
server_id = self.properties[self.INSTANCE_ID]
self.client_plugin('nova').detach_volume(server_id,
self.resource_id)
prg_detach = progress.VolumeDetachProgress(
server_id, volume_id, self.resource_id)
prg_detach.called = True
if self.VOLUME_ID in prop_diff:
volume_id = prop_diff.get(self.VOLUME_ID)
device = (self.properties[self.DEVICE]
if self.properties[self.DEVICE] else None)
if self.DEVICE in prop_diff:
device = (prop_diff[self.DEVICE]
if prop_diff[self.DEVICE] else None)
if self.INSTANCE_ID in prop_diff:
server_id = prop_diff.get(self.INSTANCE_ID)
prg_attach = progress.VolumeAttachProgress(
server_id, volume_id, device)
return prg_detach, prg_attach
def check_update_complete(self, checkers):
prg_detach, prg_attach = checkers
if not (prg_detach and prg_attach):
return True
if not prg_detach.cinder_complete:
prg_detach.cinder_complete = self.client_plugin(
).check_detach_volume_complete(prg_detach.vol_id)
return False
if not prg_detach.nova_complete:
prg_detach.nova_complete = self.client_plugin(
'nova').check_detach_volume_complete(prg_detach.srv_id,
self.resource_id)
return False
if not prg_attach.called:
prg_attach.called = self.client_plugin('nova').attach_volume(
prg_attach.srv_id, prg_attach.vol_id, prg_attach.device)
return False
if not prg_attach.complete:
prg_attach.complete = self.client_plugin(
).check_attach_volume_complete(prg_attach.vol_id)
if prg_attach.complete:
self.resource_id_set(prg_attach.called)
return prg_attach.complete
return True
def resource_mapping():
return {
'OS::Cinder::Volume': CinderVolume,
'OS::Cinder::VolumeAttachment': CinderVolumeAttachment,
}
|
py | b40b9ec9a093ff1af3e2adf33a28ec122b47bd99 | import json
import os
def show_words(sets):
sets = list(sets)
for set in sets:
print("{:-^61}".format(os.path.basename(set)))
with open(set, 'r') as fileobj:
content = json.load(fileobj)
for w, d in content.items():
if str(type(d)) == "<class 'list'>":
d = ' '.join(d)
print("{:^30} {:^30}".format(w, str(d)))
print('\n')
sets = ["people.json", "items.json"]
show_words(sets) |
py | b40b9eede4727f07bf4e57a80bdf208a22657397 | #%%
# print("Hello world!")
#%%
# Question 1: Create a Markdown cell with the followings:
# Two paragraphs about yourself. In one of the paragraphs, give a hyperlink of a website
# that you want us to see. Can be about yourself, or something you like.
#%%
# Question 2: Create
# a list of all the class titles that you are planning to take in the data science program.
# Have at least 6 classes, even if you are not a DS major
# Then print out the last entry in your list.
#%%
# Question 3: After you completed question 2, you feel Intro to data mining is too stupid, so you are going
# to replace it with Intro to Coal mining. Do that in python here.
#%%
# Question 4: Before you go see your acadmic advisor, you are
# asked to create a python dictionary of the classes you plan to take,
# with the course number as key. Please do that. Don't forget that your advisor
# probably doesn't like coal. And that coal mining class doesn't even have a
# course number.
#%%
# Question 5: print out and show your advisor how many
# classes (print out the number, not the list/dictionary) you plan
# to take.
#%%
# Question 6: Using loops
# Goal: print out the list of days (31) in Jan 2021 like this
# Sat - 2022/1/1
# Sun - 2022/1/2
# Mon - 2022/1/3
# Tue - 2022/1/4
# Wed - 2022/1/5
# Thu - 2022/1/6
# Fri - 2022/1/7
# Sat - 2022/1/8
# Sun - 2022/1/9
# Mon - 2022/1/10
# Tue - 2022/1/11
# Wed - 2022/1/12
# Thu - 2022/1/13
# ...
# You might find something like this useful, especially if you use the remainder property x%7
dayofweektuple = ('Sun','Mon','Tue','Wed','Thu','Fri','Sat') # day-of-week-tuple
# %%[markdown]
# # Additional Exercise:
# Choose three of the five exercises below to complete.
#%%
# =================================================================
# Class_Ex1:
# Write python codes that converts seconds, say 257364 seconds, to
# (x Hour, x min, x seconds)
# ----------------------------------------------------------------
#%%
# =================================================================
# Class_Ex2:
# Write a python codes to print all the different arrangements of the
# letters A, B, and C. Each string printed is a permutation of ABC.
# Hint: one way is to create three nested loops.
# ----------------------------------------------------------------
#%%
# =================================================================
# Class_Ex3:
# Write a python codes to print all the different arrangements of the
# letters A, B, C and D. Each string printed is a permutation of ABCD.
# ----------------------------------------------------------------
#%%
# =================================================================
# Class_Ex4:
# Suppose we wish to draw a triangular tree, and its height is provided
# by the user, like this, for a height of 5:
# *
# ***
# *****
# *******
# *********
# ----------------------------------------------------------------
#%%
# =================================================================
# Class_Ex5:
# Write python codes to print prime numbers up to a specified
# values, say up to 200.
# ----------------------------------------------------------------
# ================================================================= |
py | b40b9fa2e80beb4e275c5e9d7efe4f303fffa941 | from cla_public.app import create_app
app = create_app(config_file="config/docker.py")
|
py | b40ba0227006148ebbad1e31f26cf118a34ca5b5 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='adafruit-circuitpython-clue',
use_scm_version=True,
setup_requires=['setuptools_scm'],
description='A high level library representing all the features of the Adafruit CLUE.',
long_description=long_description,
long_description_content_type='text/x-rst',
# The project's main homepage.
url='https://github.com/adafruit/Adafruit_CircuitPython_CLUE',
# Author details
author='Adafruit Industries',
author_email='[email protected]',
install_requires=[
'Adafruit-Blinka',
'adafruit-circuitpython-busdevice',
'adafruit-circuitpython-register',
'adafruit-circuitpython-neopixel',
'adafruit-circuitpython-sht31d',
'adafruit-circuitpython-lsm6ds',
'adafruit-circuitpython-lis3mdl',
'adafruit-circuitpython-display-text',
'adafruit-circuitpython-bmp280',
'adafruit-circuitpython-apds9960'
],
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Hardware',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='adafruit blinka circuitpython micropython clue sensor humidity temperature '
'pressure altitude color proximity gesture light sensors gyro acceleration sound',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# TODO: IF LIBRARY FILES ARE A PACKAGE FOLDER,
# CHANGE `py_modules=['...']` TO `packages=['...']`
py_modules=['adafruit_clue'],
)
|
py | b40ba12cdc179214eab889e6180eb1babf95a890 | # sandbox for playing with time confs
from datetime import datetime
from datetime import timezone
SCHEDULE = {
"Intro to Python": {
"start_time": datetime(2020, 11, 5, 12, tzinfo=timezone.utc),
"duration": "1 hour",
"attended": False
},
"Intermediate Python": {
"start_time": datetime(2020, 11, 5, 14, tzinfo=timezone.utc),
"duration": "1 hour",
"attended": False
}
}
print(SCHEDULE['Intermediate Python']['start_time'] - SCHEDULE['Intro to Python']['start_time']) |
py | b40ba2f0e048e24880780242bf0ac5f77c531e04 | from flask import jsonify, session, request
from flask_restx import Resource, inputs
from flask_babel import gettext
from opentera.db.models.TeraSession import TeraSession
from opentera.db.models.TeraParticipant import TeraParticipant
from modules.DatabaseModule.DBManager import DBManager
from modules.LoginModule.LoginModule import LoginModule
from sqlalchemy import exc
from modules.FlaskModule.FlaskModule import device_api_ns as api
from opentera.db.models.TeraDevice import TeraDevice
import datetime
# Parser definition(s)
get_parser = api.parser()
get_parser.add_argument('token', type=str, help='Secret Token')
get_parser.add_argument('id_session', type=int, help='Session ID')
get_parser.add_argument('list', type=inputs.boolean, help='List all sessions')
post_parser = api.parser()
post_parser.add_argument('token', type=str, help='Secret Token')
post_parser.add_argument('session', type=str, location='json', help='Session to create / update', required=True)
session_schema = api.schema_model('device_session', {
'properties': {
'session': {
'type': 'object',
'properties': {
'id_session': {
'type': 'integer'
},
'session_participants': {
'type': 'array',
'uniqueItems': True,
'items': {
'type': 'string',
'format': 'uuid'
}
},
'id_session_type': {
'type': 'integer'
},
'session_name': {
'type': 'string'
},
'session_status': {
'type': 'integer'
},
'session_start_datetime': {
'type': 'string'
}
},
'required': ['id_session', 'session_participants',
'id_session_type', 'session_name', 'session_status', 'session_start_datetime']
},
},
'type': 'object',
'required': ['session']
})
class DeviceQuerySessions(Resource):
def __init__(self, _api, flaskModule=None):
Resource.__init__(self, _api)
self.module = flaskModule
@LoginModule.device_token_or_certificate_required
@api.expect(get_parser)
@api.doc(description='Get session',
responses={403: 'Forbidden for security reasons.'})
def get(self):
# current_device = TeraDevice.get_device_by_uuid(session['_user_id'])
# device_access = DBManager.deviceAccess(current_device)
# args = get_parser.parse_args(strict=True)
#
# # Get all sessions
# sessions = device_access.get_accessible_sessions()
#
# # Can't query sessions, unless we have a parameter!
# if not any(args.values()):
# return '', 400
#
# elif args['id_session']:
# sessions = device_access.query_session(session_id=args['id_session'])
# try:
# sessions_list = []
# for ses in sessions:
# if args['list'] is None:
# session_json = ses.to_json()
# sessions_list.append(session_json)
# else:
# session_json = ses.to_json(minimal=True)
# sessions_list.append(session_json)
#
# return sessions_list
#
# except InvalidRequestError:
# return '', 500
return gettext('Forbidden for security reasons'), 403
@LoginModule.device_token_or_certificate_required
@api.expect(session_schema, validate=True)
@api.doc(description='Update/Create session',
responses={200: 'Success',
400: 'Required parameter is missing',
500: 'Internal server error',
501: 'Not implemented',
403: 'Logged device doesn\'t have permission to access the requested data'})
def post(self):
current_device = TeraDevice.get_device_by_uuid(session['_user_id'])
# current_device = TeraDevice.get_device_by_id(4) # For tests only
args = post_parser.parse_args()
# Using request.json instead of parser, since parser messes up the json!
if 'session' not in request.json:
return gettext('Missing arguments'), 400
json_session = request.json['session']
device_access = DBManager.deviceAccess(current_device)
# Validate if we have an id
if 'id_session' not in json_session:
return gettext('Missing arguments'), 400
# Validate if we have an id
if 'id_session_type' not in json_session:
return gettext('Missing arguments'), 400
# Validate that we have session participants or users for new sessions
if ('session_participants' not in json_session and 'session_users' not in json_session) \
and json_session['id_session'] == 0:
return gettext('Missing arguments'), 400
# We know we have a device
# Avoid identity thief
json_session['id_creator_device'] = current_device.id_device
# Validate session type
session_types = device_access.get_accessible_session_types_ids()
if not json_session['id_session_type'] in session_types:
return gettext('Unauthorized'), 403
# Check if a session of that type and name already exists. If so, don't create it, just returns it.
if json_session['id_session'] == 0:
if 'session_name' not in json_session:
return gettext('Missing argument \'session name\''), 400
if 'session_start_datetime' not in json_session:
return gettext('Missing argument \'session_start_datetime\''), 400
existing_session = device_access.query_existing_session(session_name=json_session['session_name'],
session_type_id=json_session['id_session_type'],
session_date=datetime.datetime.fromisoformat(
json_session['session_start_datetime']),
participant_uuids=
json_session['session_participants']
)
if existing_session:
json_session['id_session'] = existing_session.id_session
# Don't change session start datetime
json_session['session_start_datetime'] = existing_session.session_start_datetime.isoformat()
else:
# Existing session - check if we can access it
if json_session['id_session'] not in device_access.get_accessible_sessions_ids():
return gettext('Unauthorized', 403)
# Do the update!
if json_session['id_session'] > 0:
# Already existing
# TODO handle participant list (remove, add) in session
try:
if 'session_participants' in json_session:
participants = json_session.pop('session_participants')
# print('removing participants', participants)
TeraSession.update(json_session['id_session'], json_session)
except exc.SQLAlchemyError as e:
import sys
print(sys.exc_info())
self.module.logger.log_error(self.module.module_name,
DeviceQuerySessions.__name__,
'post', 500, 'Database error', str(e))
return gettext('Database error'), 500
else:
# New
try:
new_ses = TeraSession()
participants = json_session.pop('session_participants')
new_ses.from_json(json_session)
TeraSession.insert(new_ses)
for p_uuid in participants:
participant = TeraParticipant.get_participant_by_uuid(p_uuid)
new_ses.session_participants.append(participant)
if len(participants) > 0:
new_ses.commit() # Commits added participants
# Update ID for further use
json_session['id_session'] = new_ses.id_session
except exc.SQLAlchemyError as e:
import sys
print(sys.exc_info(), e)
self.module.logger.log_error(self.module.module_name,
DeviceQuerySessions.__name__,
'post', 500, 'Database error', str(e))
return '', 500
update_session = TeraSession.get_session_by_id(json_session['id_session'])
return jsonify(update_session.to_json())
@LoginModule.device_token_or_certificate_required
def delete(self):
return gettext('Forbidden for security reasons'), 403
|
py | b40ba3b2dc922c9dac23f1bd5f9076ebce76c6ef | """Platform for Baby Buddy binary switch integration."""
from __future__ import annotations
import logging
from datetime import datetime, time
from typing import Any
import homeassistant.util.dt as dt_util
import voluptuous as vol
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ID, ATTR_NAME, CONF_HOST
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import BabyBuddyCoordinator
from .client import get_datetime_from_time
from .const import (
ATTR_ACTIVE,
ATTR_AMOUNT,
ATTR_CHILD,
ATTR_END,
ATTR_FEEDINGS,
ATTR_FIRST_NAME,
ATTR_LAST_NAME,
ATTR_METHOD,
ATTR_MILESTONE,
ATTR_NOTES,
ATTR_SLEEP,
ATTR_START,
ATTR_TIMER,
ATTR_TIMERS,
ATTR_TUMMY_TIMES,
ATTR_TYPE,
DOMAIN,
FEEDING_METHODS,
FEEDING_TYPES,
)
from .errors import ValidationError
_LOGGER = logging.getLogger(__name__)
COMMON_FIELDS = {
vol.Optional(ATTR_TIMER, default=False): cv.boolean,
vol.Optional(ATTR_START): vol.Any(cv.datetime, cv.time),
vol.Optional(ATTR_END): vol.Any(cv.datetime, cv.time),
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the babybuddy switches."""
babybuddy_coordinator: BabyBuddyCoordinator = hass.data[DOMAIN][
config_entry.entry_id
]
tracked: dict = {}
@callback
def update_entities() -> None:
"""Update the status of entities."""
update_items(babybuddy_coordinator, tracked, async_add_entities)
config_entry.async_on_unload(
babybuddy_coordinator.async_add_listener(update_entities)
)
update_entities()
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
"start_timer",
{
vol.Optional(ATTR_START): vol.Any(cv.datetime, cv.time),
vol.Optional(ATTR_NAME): cv.string,
},
"async_start_timer",
)
platform.async_register_entity_service(
"add_feeding",
{
vol.Required(ATTR_TYPE): vol.In(FEEDING_TYPES),
vol.Required(ATTR_METHOD): vol.In(FEEDING_METHODS),
**COMMON_FIELDS,
vol.Optional(ATTR_AMOUNT): cv.positive_float,
vol.Optional(ATTR_NOTES): cv.string,
},
"async_add_feeding",
)
platform.async_register_entity_service(
"add_sleep",
{
**COMMON_FIELDS,
vol.Optional(ATTR_NOTES): cv.string,
},
"async_add_sleep",
)
platform.async_register_entity_service(
"add_tummy_time",
{
**COMMON_FIELDS,
vol.Optional(ATTR_MILESTONE): cv.string,
},
"async_add_tummy_time",
)
@callback
def update_items(
coordinator: BabyBuddyCoordinator,
tracked: dict,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add timer switches to new child."""
new_entities = []
if coordinator.data is not None:
for child in coordinator.data[0]:
if child[ATTR_ID] not in tracked:
tracked[child[ATTR_ID]] = BabyBuddyChildTimerSwitch(coordinator, child)
new_entities.append(tracked[child[ATTR_ID]])
if new_entities:
async_add_entities(new_entities)
class BabyBuddyChildTimerSwitch(CoordinatorEntity, SwitchEntity):
"""Representation of a babybuddy timer switch."""
coordinator: BabyBuddyCoordinator
def __init__(
self,
coordinator: BabyBuddyCoordinator,
child: dict,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator)
self.child = child
self._attr_name = (
f"{self.child[ATTR_FIRST_NAME]} {self.child[ATTR_LAST_NAME]} {ATTR_TIMER}"
)
self._attr_unique_id = f"{self.coordinator.config_entry.data[CONF_HOST]}-{child[ATTR_ID]}-{ATTR_TIMER}"
self._attr_icon = "mdi:timer-sand"
self._attr_device_info = {
"identifiers": {(DOMAIN, child[ATTR_ID])},
"default_name": f"{child[ATTR_FIRST_NAME]} {child[ATTR_LAST_NAME]}",
}
@property
def is_on(self) -> bool:
"""Return entity state."""
is_on = False
if self.child[ATTR_ID] in self.coordinator.data[1]:
is_on = self.coordinator.data[1][self.child[ATTR_ID]][ATTR_TIMERS].get(
ATTR_ACTIVE, False
)
return is_on
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return entity specific state attributes for babybuddy."""
attrs: dict[str, Any] = {}
if self.is_on:
attrs = self.coordinator.data[1][self.child[ATTR_ID]].get(ATTR_TIMERS)
return attrs
async def async_turn_on(self, **kwargs: Any) -> None:
"""Start a new timer."""
await self.async_start_timer()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Delete active timer."""
timer_id = self.extra_state_attributes[ATTR_ID]
await self.coordinator.client.async_delete(ATTR_TIMERS, timer_id)
await self.coordinator.async_request_refresh()
async def async_start_timer(
self, start: datetime | time | None = None, name: str | None = None
) -> None:
"""Start a new timer for child."""
data: dict[str, Any] = {ATTR_CHILD: self.child[ATTR_ID]}
try:
data[ATTR_START] = get_datetime_from_time(start or dt_util.now())
except ValidationError as err:
_LOGGER.error(err)
return
if name:
data[ATTR_NAME] = name
await self.coordinator.client.async_post(ATTR_TIMERS, data)
await self.coordinator.async_request_refresh()
async def async_add_feeding(
self,
type: str,
method: str,
timer: bool,
start: datetime | time | None = None,
end: datetime | time | None = None,
amount: int | None = None,
notes: str | None = None,
) -> None:
"""Add a feeding entry."""
try:
data = self.set_common_fields(timer, start, end)
except ValidationError as err:
_LOGGER.error(err)
return
data.update(
{
ATTR_TYPE: type.lower(),
ATTR_METHOD: method.lower(),
}
)
if amount:
data[ATTR_AMOUNT] = amount
if notes:
data[ATTR_NOTES] = notes
await self.coordinator.client.async_post(ATTR_FEEDINGS, data)
await self.coordinator.async_request_refresh()
async def async_add_sleep(
self,
timer: bool,
start: datetime | time | None = None,
end: datetime | time | None = None,
notes: str | None = None,
) -> None:
"""Add a sleep entry."""
try:
data = self.set_common_fields(timer, start, end)
except ValidationError as err:
_LOGGER.error(err)
return
if notes:
data[ATTR_NOTES] = notes
await self.coordinator.client.async_post(ATTR_SLEEP, data)
await self.coordinator.async_request_refresh()
async def async_add_tummy_time(
self,
timer: bool,
start: datetime | time | None = None,
end: datetime | time | None = None,
milestone: str | None = None,
) -> None:
"""Add a tummy time entry."""
try:
data = self.set_common_fields(timer, start, end)
except ValidationError as err:
_LOGGER.error(err)
return
if milestone:
data[ATTR_MILESTONE] = milestone
await self.coordinator.client.async_post(ATTR_TUMMY_TIMES, data)
await self.coordinator.async_request_refresh()
def set_common_fields(
self,
timer: bool,
start: datetime | time | None = None,
end: datetime | time | None = None,
) -> dict[str, Any]:
"""Set data common fields."""
data: dict[str, Any] = {}
if timer:
if not self.is_on:
raise ValidationError(
"Timer not found or stopped. Timer must be active."
)
data[ATTR_TIMER] = self.extra_state_attributes[ATTR_ID]
else:
data[ATTR_CHILD] = self.child[ATTR_ID]
data[ATTR_START] = get_datetime_from_time(start or dt_util.now())
data[ATTR_END] = get_datetime_from_time(end or dt_util.now())
return data
|
py | b40ba4c758093595dbdbdf52c001f0dcd622df38 | import factory
from factory.django import DjangoModelFactory
class TicketTimeEntryFactory(DjangoModelFactory):
class Meta:
model = 'tickets.TicketTimeEntry'
ticket = factory.SubFactory('apps.tickets.factories.TicketFactory')
employee = factory.SubFactory('apps.tickets.factories.EmployeeFactory')
note = factory.Faker('sentence', nb_words=8)
date_from = factory.Faker('date_time')
date_to = factory.Faker('date_time')
@classmethod
def build_dict(cls, **extra_fields):
data = factory.build(dict, FACTORY_CLASS=cls)
data.pop('employee', None)
data.pop('ticket', None)
data.update(**extra_fields)
return data
|
py | b40ba4d2d9bcfea1e0df93278d4a79f07ce39368 | def foo():
"famous foo function"
return 1
def bar():
"famous bar function"
if foo():
return "bar"
else
return "glork"
|
py | b40ba4f9eaa0a6d049762f19aa9f2d7848b7a951 | # encoding: utf-8
# module Tekla.Structures.Drawing.UI calls itself UI
# from Tekla.Structures.Drawing, Version=2017.0.0.0, Culture=neutral, PublicKeyToken=2f04dbe497b71114
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class DrawingObjectSelector(object):
# no doc
def GetSelected(self):
""" GetSelected(self: DrawingObjectSelector) -> DrawingObjectEnumerator """
pass
def SelectObject(self, DrawingObject):
""" SelectObject(self: DrawingObjectSelector, DrawingObject: DrawingObject) -> bool """
pass
def SelectObjects(self, DrawingObjects, ExtendSelection):
""" SelectObjects(self: DrawingObjectSelector, DrawingObjects: ArrayList, ExtendSelection: bool) -> bool """
pass
def UnselectAllObjects(self):
""" UnselectAllObjects(self: DrawingObjectSelector) -> bool """
pass
def UnselectObject(self, DrawingObject):
""" UnselectObject(self: DrawingObjectSelector, DrawingObject: DrawingObject) -> bool """
pass
def UnselectObjects(self, DrawingObjects):
""" UnselectObjects(self: DrawingObjectSelector, DrawingObjects: ArrayList) -> bool """
pass
class DrawingSelector(object):
# no doc
def GetSelected(self):
""" GetSelected(self: DrawingSelector) -> DrawingEnumerator """
pass
class Events(MarshalByRefObject):
""" Events() """
def InitializeLifetimeService(self):
""" InitializeLifetimeService(self: Events) -> object """
pass
def OnDrawingEditorClose(self, EventName, Parameters):
""" OnDrawingEditorClose(self: Events, EventName: str, *Parameters: Array[object]) """
pass
def OnDrawingEditorOpen(self, EventName, Parameters):
""" OnDrawingEditorOpen(self: Events, EventName: str, *Parameters: Array[object]) """
pass
def OnDrawingListSelectionChanged(self, EventName, Parameters):
""" OnDrawingListSelectionChanged(self: Events, EventName: str, *Parameters: Array[object]) """
pass
def OnDrawingLoaded(self, EventName, Parameters):
""" OnDrawingLoaded(self: Events, EventName: str, *Parameters: Array[object]) """
pass
def OnSelectionChange(self, EventName, Parameters):
""" OnSelectionChange(self: Events, EventName: str, *Parameters: Array[object]) """
pass
def Register(self):
""" Register(self: Events) """
pass
def UnRegister(self):
""" UnRegister(self: Events) """
pass
DrawingEditorClosed = None
DrawingEditorClosedDelegate = None
DrawingEditorOpened = None
DrawingEditorOpenedDelegate = None
DrawingListSelectionChanged = None
DrawingListSelectionChangedDelegate = None
DrawingLoaded = None
DrawingLoadedDelegate = None
SelectionChange = None
SelectionChangeDelegate = None
class Picker(object):
# no doc
def IsInteractive(self):
""" IsInteractive(self: Picker) -> bool """
pass
def PickObject(self, prompt, *__args):
"""
PickObject(self: Picker, prompt: str) -> (DrawingObject, ViewBase, Point)
PickObject(self: Picker, prompt: str, typeFilter: Array[Type]) -> (DrawingObject, ViewBase, Point)
PickObject(self: Picker, prompt: str) -> (DrawingObject, ViewBase)
PickObject(self: Picker, prompt: str) -> Tuple[DrawingObject, ViewBase]
"""
pass
def PickObjectAndPoint(self, prompt):
""" PickObjectAndPoint(self: Picker, prompt: str) -> Tuple[DrawingObject, ViewBase, Point] """
pass
def PickPoint(self, prompt, pickedPoint=None, pickedView=None):
"""
PickPoint(self: Picker, prompt: str) -> Tuple[Point, ViewBase]
PickPoint(self: Picker, prompt: str) -> (Point, ViewBase)
"""
pass
def PickPoints(self, *__args):
"""
PickPoints(self: Picker, prompts: StringList) -> (PointList, ViewBase)
PickPoints(self: Picker, prompts: StringList) -> Tuple[PointList, ViewBase]
PickPoints(self: Picker, numberOfPicks: int, prompts: StringList) -> (PointList, ViewBase)
PickPoints(self: Picker, numberOfPicks: int, prompts: StringList) -> Tuple[PointList, ViewBase]
"""
pass
def PickThreePoints(self, firstPrompt, secondPrompt, thirdPrompt, firstPickedPoint, secondPickedPoint, thirdPickedPoint, pickedView):
""" PickThreePoints(self: Picker, firstPrompt: str, secondPrompt: str, thirdPrompt: str) -> (Point, Point, Point, ViewBase) """
pass
def PickTwoPoints(self, firstPrompt, secondPrompt, firstPickedPoint, secondPickedPoint, pickedView):
""" PickTwoPoints(self: Picker, firstPrompt: str, secondPrompt: str) -> (Point, Point, ViewBase) """
pass
|
py | b40ba54c919f18fe30359ff4d618d2e180b605e2 | data = (
'Wei ', # 0x00
'Bai ', # 0x01
'Chen ', # 0x02
'Zhuan ', # 0x03
'Zhi ', # 0x04
'Zhui ', # 0x05
'Biao ', # 0x06
'Yun ', # 0x07
'Zeng ', # 0x08
'Tan ', # 0x09
'Zan ', # 0x0a
'Yan ', # 0x0b
'[?] ', # 0x0c
'Shan ', # 0x0d
'Wan ', # 0x0e
'Ying ', # 0x0f
'Jin ', # 0x10
'Gan ', # 0x11
'Xian ', # 0x12
'Zang ', # 0x13
'Bi ', # 0x14
'Du ', # 0x15
'Shu ', # 0x16
'Yan ', # 0x17
'[?] ', # 0x18
'Xuan ', # 0x19
'Long ', # 0x1a
'Gan ', # 0x1b
'Zang ', # 0x1c
'Bei ', # 0x1d
'Zhen ', # 0x1e
'Fu ', # 0x1f
'Yuan ', # 0x20
'Gong ', # 0x21
'Cai ', # 0x22
'Ze ', # 0x23
'Xian ', # 0x24
'Bai ', # 0x25
'Zhang ', # 0x26
'Huo ', # 0x27
'Zhi ', # 0x28
'Fan ', # 0x29
'Tan ', # 0x2a
'Pin ', # 0x2b
'Bian ', # 0x2c
'Gou ', # 0x2d
'Zhu ', # 0x2e
'Guan ', # 0x2f
'Er ', # 0x30
'Jian ', # 0x31
'Bi ', # 0x32
'Shi ', # 0x33
'Tie ', # 0x34
'Gui ', # 0x35
'Kuang ', # 0x36
'Dai ', # 0x37
'Mao ', # 0x38
'Fei ', # 0x39
'He ', # 0x3a
'Yi ', # 0x3b
'Zei ', # 0x3c
'Zhi ', # 0x3d
'Jia ', # 0x3e
'Hui ', # 0x3f
'Zi ', # 0x40
'Ren ', # 0x41
'Lu ', # 0x42
'Zang ', # 0x43
'Zi ', # 0x44
'Gai ', # 0x45
'Jin ', # 0x46
'Qiu ', # 0x47
'Zhen ', # 0x48
'Lai ', # 0x49
'She ', # 0x4a
'Fu ', # 0x4b
'Du ', # 0x4c
'Ji ', # 0x4d
'Shu ', # 0x4e
'Shang ', # 0x4f
'Si ', # 0x50
'Bi ', # 0x51
'Zhou ', # 0x52
'Geng ', # 0x53
'Pei ', # 0x54
'Tan ', # 0x55
'Lai ', # 0x56
'Feng ', # 0x57
'Zhui ', # 0x58
'Fu ', # 0x59
'Zhuan ', # 0x5a
'Sai ', # 0x5b
'Ze ', # 0x5c
'Yan ', # 0x5d
'Zan ', # 0x5e
'Yun ', # 0x5f
'Zeng ', # 0x60
'Shan ', # 0x61
'Ying ', # 0x62
'Gan ', # 0x63
'Chi ', # 0x64
'Xi ', # 0x65
'She ', # 0x66
'Nan ', # 0x67
'Xiong ', # 0x68
'Xi ', # 0x69
'Cheng ', # 0x6a
'He ', # 0x6b
'Cheng ', # 0x6c
'Zhe ', # 0x6d
'Xia ', # 0x6e
'Tang ', # 0x6f
'Zou ', # 0x70
'Zou ', # 0x71
'Li ', # 0x72
'Jiu ', # 0x73
'Fu ', # 0x74
'Zhao ', # 0x75
'Gan ', # 0x76
'Qi ', # 0x77
'Shan ', # 0x78
'Qiong ', # 0x79
'Qin ', # 0x7a
'Xian ', # 0x7b
'Ci ', # 0x7c
'Jue ', # 0x7d
'Qin ', # 0x7e
'Chi ', # 0x7f
'Ci ', # 0x80
'Chen ', # 0x81
'Chen ', # 0x82
'Die ', # 0x83
'Ju ', # 0x84
'Chao ', # 0x85
'Di ', # 0x86
'Se ', # 0x87
'Zhan ', # 0x88
'Zhu ', # 0x89
'Yue ', # 0x8a
'Qu ', # 0x8b
'Jie ', # 0x8c
'Chi ', # 0x8d
'Chu ', # 0x8e
'Gua ', # 0x8f
'Xue ', # 0x90
'Ci ', # 0x91
'Tiao ', # 0x92
'Duo ', # 0x93
'Lie ', # 0x94
'Gan ', # 0x95
'Suo ', # 0x96
'Cu ', # 0x97
'Xi ', # 0x98
'Zhao ', # 0x99
'Su ', # 0x9a
'Yin ', # 0x9b
'Ju ', # 0x9c
'Jian ', # 0x9d
'Que ', # 0x9e
'Tang ', # 0x9f
'Chuo ', # 0xa0
'Cui ', # 0xa1
'Lu ', # 0xa2
'Qu ', # 0xa3
'Dang ', # 0xa4
'Qiu ', # 0xa5
'Zi ', # 0xa6
'Ti ', # 0xa7
'Qu ', # 0xa8
'Chi ', # 0xa9
'Huang ', # 0xaa
'Qiao ', # 0xab
'Qiao ', # 0xac
'Yao ', # 0xad
'Zao ', # 0xae
'Ti ', # 0xaf
'[?] ', # 0xb0
'Zan ', # 0xb1
'Zan ', # 0xb2
'Zu ', # 0xb3
'Pa ', # 0xb4
'Bao ', # 0xb5
'Ku ', # 0xb6
'Ke ', # 0xb7
'Dun ', # 0xb8
'Jue ', # 0xb9
'Fu ', # 0xba
'Chen ', # 0xbb
'Jian ', # 0xbc
'Fang ', # 0xbd
'Zhi ', # 0xbe
'Sa ', # 0xbf
'Yue ', # 0xc0
'Pa ', # 0xc1
'Qi ', # 0xc2
'Yue ', # 0xc3
'Qiang ', # 0xc4
'Tuo ', # 0xc5
'Tai ', # 0xc6
'Yi ', # 0xc7
'Nian ', # 0xc8
'Ling ', # 0xc9
'Mei ', # 0xca
'Ba ', # 0xcb
'Die ', # 0xcc
'Ku ', # 0xcd
'Tuo ', # 0xce
'Jia ', # 0xcf
'Ci ', # 0xd0
'Pao ', # 0xd1
'Qia ', # 0xd2
'Zhu ', # 0xd3
'Ju ', # 0xd4
'Die ', # 0xd5
'Zhi ', # 0xd6
'Fu ', # 0xd7
'Pan ', # 0xd8
'Ju ', # 0xd9
'Shan ', # 0xda
'Bo ', # 0xdb
'Ni ', # 0xdc
'Ju ', # 0xdd
'Li ', # 0xde
'Gen ', # 0xdf
'Yi ', # 0xe0
'Ji ', # 0xe1
'Dai ', # 0xe2
'Xian ', # 0xe3
'Jiao ', # 0xe4
'Duo ', # 0xe5
'Zhu ', # 0xe6
'Zhuan ', # 0xe7
'Kua ', # 0xe8
'Zhuai ', # 0xe9
'Gui ', # 0xea
'Qiong ', # 0xeb
'Kui ', # 0xec
'Xiang ', # 0xed
'Chi ', # 0xee
'Lu ', # 0xef
'Beng ', # 0xf0
'Zhi ', # 0xf1
'Jia ', # 0xf2
'Tiao ', # 0xf3
'Cai ', # 0xf4
'Jian ', # 0xf5
'Ta ', # 0xf6
'Qiao ', # 0xf7
'Bi ', # 0xf8
'Xian ', # 0xf9
'Duo ', # 0xfa
'Ji ', # 0xfb
'Ju ', # 0xfc
'Ji ', # 0xfd
'Shu ', # 0xfe
'Tu ', # 0xff
)
|
py | b40ba667737c0f501b7f4c7546ab0ad9ee842603 | from cereal import car
from selfdrive.car import dbc_dict
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
VisualAlert = car.CarControl.HUDControl.VisualAlert
# Car button codes
class CruiseButtons:
RES_ACCEL = 4
DECEL_SET = 3
CANCEL = 2
MAIN = 1
#car chimes: enumeration from dbc file. Chimes are for alerts and warnings
class CM:
MUTE = 0
SINGLE = 3
DOUBLE = 4
REPEATED = 1
CONTINUOUS = 2
#car beeps: enumeration from dbc file. Beeps are for engage and disengage
class BP:
MUTE = 0
SINGLE = 3
TRIPLE = 2
REPEATED = 1
AUDIO_HUD = {
AudibleAlert.none: (BP.MUTE, CM.MUTE),
AudibleAlert.chimeEngage: (BP.SINGLE, CM.MUTE),
AudibleAlert.chimeDisengage: (BP.SINGLE, CM.MUTE),
AudibleAlert.chimeError: (BP.MUTE, CM.DOUBLE),
AudibleAlert.chimePrompt: (BP.MUTE, CM.SINGLE),
AudibleAlert.chimeWarning1: (BP.MUTE, CM.DOUBLE),
AudibleAlert.chimeWarning2: (BP.MUTE, CM.REPEATED),
AudibleAlert.chimeWarningRepeat: (BP.MUTE, CM.REPEATED)}
class AH:
#[alert_idx, value]
# See dbc files for info on values"
NONE = [0, 0]
FCW = [1, 1]
STEER = [2, 1]
BRAKE_PRESSED = [3, 10]
GEAR_NOT_D = [4, 6]
SEATBELT = [5, 5]
SPEED_TOO_HIGH = [6, 8]
VISUAL_HUD = {
VisualAlert.none: AH.NONE,
VisualAlert.fcw: AH.FCW,
VisualAlert.steerRequired: AH.STEER,
VisualAlert.brakePressed: AH.BRAKE_PRESSED,
VisualAlert.wrongGear: AH.GEAR_NOT_D,
VisualAlert.seatbeltUnbuckled: AH.SEATBELT,
VisualAlert.speedTooHigh: AH.SPEED_TOO_HIGH}
class CAR:
ACCORD = "HONDA ACCORD 2018 SPORT 2T"
ACCORD_15 = "HONDA ACCORD 2018 LX 1.5T"
ACCORDH = "HONDA ACCORD 2018 HYBRID TOURING"
CIVIC = "HONDA CIVIC 2016 TOURING"
CIVIC_BOSCH = "HONDA CIVIC HATCHBACK 2017 SEDAN/COUPE 2019"
ACURA_ILX = "ACURA ILX 2016 ACURAWATCH PLUS"
CRV = "HONDA CR-V 2016 TOURING"
CRV_5G = "HONDA CR-V 2017 EX"
CRV_HYBRID = "HONDA CR-V 2019 HYBRID"
ODYSSEY = "HONDA ODYSSEY 2018 EX-L"
ODYSSEY_CHN = "HONDA ODYSSEY 2019 EXCLUSIVE CHN"
ACURA_RDX = "ACURA RDX 2018 ACURAWATCH PLUS"
PILOT = "HONDA PILOT 2017 TOURING"
PILOT_2019 = "HONDA PILOT 2019 ELITE"
RIDGELINE = "HONDA RIDGELINE 2017 BLACK EDITION"
FINGERPRINTS = {
CAR.ACCORD: [{
148: 8, 228: 5, 304: 8, 330: 8, 344: 8, 380: 8, 399: 7, 419: 8, 420: 8, 427: 3, 432: 7, 441: 5, 446: 3, 450: 8, 464: 8, 477: 8, 479: 8, 495: 8, 545: 6, 662: 4, 773: 7, 777: 8, 780: 8, 804: 8, 806: 8, 808: 8, 829: 5, 862: 8, 884: 8, 891: 8, 927: 8, 929: 8, 1302: 8, 1600: 5, 1601: 8, 1652: 8
}],
CAR.ACCORD_15: [{
148: 8, 228: 5, 304: 8, 330: 8, 344: 8, 380: 8, 399: 7, 401: 8, 420: 8, 427: 3, 432: 7, 441: 5, 446: 3, 450: 8, 464: 8, 477: 8, 479: 8, 495: 8, 545: 6, 662: 4, 773: 7, 777: 8, 780: 8, 804: 8, 806: 8, 808: 8, 829: 5, 862: 8, 884: 8, 891: 8, 927: 8, 929: 8, 1302: 8, 1600: 5, 1601: 8, 1652: 8
}],
CAR.ACCORDH: [{
148: 8, 228: 5, 304: 8, 330: 8, 344: 8, 380: 8, 387: 8, 388: 8, 399: 7, 419: 8, 420: 8, 427: 3, 432: 7, 441: 5, 450: 8, 464: 8, 477: 8, 479: 8, 495: 8, 525: 8, 545: 6, 662: 4, 773: 7, 777: 8, 780: 8, 804: 8, 806: 8, 808: 8, 829: 5, 862: 8, 884: 8, 891: 8, 927: 8, 929: 8, 1302: 8, 1416: 5, 1600: 5, 1601: 8, 1652: 8
}],
CAR.ACURA_ILX: [{
57: 3, 145: 8, 228: 5, 304: 8, 316: 8, 342: 6, 344: 8, 380: 8, 398: 3, 399: 7, 419: 8, 420: 8, 422: 8, 428: 8, 432: 7, 464: 8, 476: 4, 490: 8, 506: 8, 512: 6, 513: 6, 542: 7, 545: 4, 597: 8, 660: 8, 773: 7, 777: 8, 780: 8, 800: 8, 804: 8, 808: 8, 819: 7, 821: 5, 829: 5, 882: 2, 884: 7, 887: 8, 888: 8, 892: 8, 923: 2, 929: 4, 983: 8, 985: 3, 1024: 5, 1027: 5, 1029: 8, 1030: 5, 1034: 5, 1036: 8, 1039: 8, 1057: 5, 1064: 7, 1108: 8, 1365: 5,
}],
# Acura RDX w/ Added Comma Pedal Support (512L & 513L)
CAR.ACURA_RDX: [{
57: 3, 145: 8, 229: 4, 308: 5, 316: 8, 342: 6, 344: 8, 380: 8, 392: 6, 398: 3, 399: 6, 404: 4, 420: 8, 422: 8, 426: 8, 432: 7, 464: 8, 474: 5, 476: 4, 487: 4, 490: 8, 506: 8, 512: 6, 513: 6, 542: 7, 545: 4, 597: 8, 660: 8, 773: 7, 777: 8, 780: 8, 800: 8, 804: 8, 808: 8, 819: 7, 821: 5, 829: 5, 882: 2, 884: 7, 887: 8, 888: 8, 892: 8, 923: 2, 929: 4, 963: 8, 965: 8, 966: 8, 967: 8, 983: 8, 985: 3, 1024: 5, 1027: 5, 1029: 8, 1033: 5, 1034: 5, 1036: 8, 1039: 8, 1057: 5, 1064: 7, 1108: 8, 1365: 5, 1424: 5, 1729: 1
}],
CAR.CIVIC: [{
57: 3, 148: 8, 228: 5, 304: 8, 330: 8, 344: 8, 380: 8, 399: 7, 401: 8, 420: 8, 427: 3, 428: 8, 432: 7, 450: 8, 464: 8, 470: 2, 476: 7, 487: 4, 490: 8, 493: 5, 506: 8, 512: 6, 513: 6, 545: 6, 597: 8, 662: 4, 773: 7, 777: 8, 780: 8, 795: 8, 800: 8, 804: 8, 806: 8, 808: 8, 829: 5, 862: 8, 884: 8, 891: 8, 892: 8, 927: 8, 929: 8, 985: 3, 1024: 5, 1027: 5, 1029: 8, 1036: 8, 1039: 8, 1108: 8, 1302: 8, 1322: 5, 1361: 5, 1365: 5, 1424: 5, 1633: 8,
}],
CAR.CIVIC_BOSCH: [{
# 2017 Civic Hatchback EX and 2019 Civic Sedan Touring Canadian
57: 3, 148: 8, 228: 5, 304: 8, 330: 8, 344: 8, 380: 8, 399: 7, 401: 8, 420: 8, 427: 3, 428: 8, 432: 7, 441: 5, 450: 8, 464: 8, 470: 2, 476: 7, 477: 8, 479: 8, 490: 8, 493: 5, 495: 8, 506: 8, 545: 6, 597: 8, 662: 4, 773: 7, 777: 8, 780: 8, 795: 8, 800: 8, 804: 8, 806: 8, 808: 8, 829: 5, 862: 8, 884: 8, 891: 8, 892: 8, 927: 8, 929: 8, 985: 3, 1024: 5, 1027: 5, 1029: 8, 1036: 8, 1039: 8, 1108: 8, 1302: 8, 1322: 5, 1361: 5, 1365: 5, 1424: 5, 1600: 5, 1601: 8, 1633: 8,
}],
CAR.CRV: [{
57: 3, 145: 8, 316: 8, 340: 8, 342: 6, 344: 8, 380: 8, 398: 3, 399: 6, 401: 8, 404: 4, 420: 8, 422: 8, 426: 8, 432: 7, 464: 8, 474: 5, 476: 4, 487: 4, 490: 8, 493: 3, 506: 8, 507: 1, 512: 6, 513: 6, 542: 7, 545: 4, 597: 8, 660: 8, 661: 4, 773: 7, 777: 8, 780: 8, 800: 8, 804: 8, 808: 8, 829: 5, 882: 2, 884: 7, 888: 8, 891: 8, 892: 8, 923: 2, 929: 8, 983: 8, 985: 3, 1024: 5, 1027: 5, 1029: 8, 1033: 5, 1036: 8, 1039: 8, 1057: 5, 1064: 7, 1108: 8, 1125: 8, 1296: 8, 1365: 5, 1424: 5, 1600: 5, 1601: 8,
}],
CAR.CRV_5G: [{
57: 3, 148: 8, 199: 4, 228: 5, 231: 5, 232: 7, 304: 8, 330: 8, 340: 8, 344: 8, 380: 8, 399: 7, 401: 8, 420: 8, 423: 2, 427: 3, 428: 8, 432: 7, 441: 5, 446: 3, 450: 8, 464: 8, 467: 2, 469: 3, 470: 2, 474: 8, 476: 7, 477: 8, 479: 8, 490: 8, 493: 5, 495: 8, 507: 1, 545: 6, 597: 8, 661: 4, 662: 4, 773: 7, 777: 8, 780: 8, 795: 8, 800: 8, 804: 8, 806: 8, 808: 8, 814: 4, 815: 8, 817: 4, 825: 4, 829: 5, 862: 8, 881: 8, 882: 4, 884: 8, 888: 8, 891: 8, 927: 8, 918: 7, 929: 8, 983: 8, 985: 3, 1024: 5, 1027: 5, 1029: 8, 1036: 8, 1039: 8, 1064: 7, 1108: 8, 1092: 1, 1115: 4, 1125: 8, 1127: 2, 1296: 8, 1302: 8, 1322: 5, 1361: 5, 1365: 5, 1424: 5, 1600: 5, 1601: 8, 1618: 5, 1633: 8, 1670: 5
}],
CAR.CRV_HYBRID: [{
57: 3, 148: 8, 228: 5, 304: 8, 330: 8, 344: 8, 380: 8, 387: 8, 388: 8, 399: 7, 408: 6, 415: 6, 419: 8, 420: 8, 427: 3, 428: 8, 432: 7, 441: 5, 450: 8, 464: 8, 477: 8, 479: 8, 490: 8, 495: 8, 525: 8, 531: 8, 545: 6, 662: 4, 773: 7, 777: 8, 780: 8, 804: 8, 806: 8, 808: 8, 814: 4, 829: 5, 833: 6, 862: 8, 884: 8, 891: 8, 927: 8, 929: 8, 930: 8, 931: 8, 1302: 8, 1361: 5, 1365: 5, 1600: 5, 1601: 8, 1626: 5, 1627: 5
}],
# 2018 Odyssey w/ Added Comma Pedal Support (512L & 513L)
CAR.ODYSSEY: [{
57: 3, 148: 8, 228: 5, 229: 4, 316: 8, 342: 6, 344: 8, 380: 8, 399: 7, 411: 5, 419: 8, 420: 8, 427: 3, 432: 7, 450: 8, 463: 8, 464: 8, 476: 4, 490: 8, 506: 8, 512: 6, 513: 6, 542: 7, 545: 6, 597: 8, 662: 4, 773: 7, 777: 8, 780: 8, 795: 8, 800: 8, 804: 8, 806: 8, 808: 8, 817: 4, 819: 7, 821: 5, 825: 4, 829: 5, 837: 5, 856: 7, 862: 8, 871: 8, 881: 8, 882: 4, 884: 8, 891: 8, 892: 8, 905: 8, 923: 2, 927: 8, 929: 8, 963: 8, 965: 8, 966: 8, 967: 8, 983: 8, 985: 3, 1029: 8, 1036: 8, 1052: 8, 1064: 7, 1088: 8, 1089: 8, 1092: 1, 1108: 8, 1110: 8, 1125: 8, 1296: 8, 1302: 8, 1600: 5, 1601: 8, 1612: 5, 1613: 5, 1614: 5, 1615: 8, 1616: 5, 1619: 5, 1623: 5, 1668: 5
},
# 2018 Odyssey Elite w/ Added Comma Pedal Support (512L & 513L)
{
57: 3, 148: 8, 228: 5, 229: 4, 304: 8, 342: 6, 344: 8, 380: 8, 399: 7, 411: 5, 419: 8, 420: 8, 427: 3, 432: 7, 440: 8, 450: 8, 463: 8, 464: 8, 476: 4, 490: 8, 506: 8, 507: 1, 542: 7, 545: 6, 597: 8, 662: 4, 773: 7, 777: 8, 780: 8, 795: 8, 800: 8, 804: 8, 806: 8, 808: 8, 817: 4, 819: 7, 821: 5, 825: 4, 829: 5, 837: 5, 856: 7, 862: 8, 871: 8, 881: 8, 882: 4, 884: 8, 891: 8, 892: 8, 905: 8, 923: 2, 927: 8, 929: 8, 963: 8, 965: 8, 966: 8, 967: 8, 983: 8, 985: 3, 1029: 8, 1036: 8, 1052: 8, 1064: 7, 1088: 8, 1089: 8, 1092: 1, 1108: 8, 1110: 8, 1125: 8, 1296: 8, 1302: 8, 1600: 5, 1601: 8, 1612: 5, 1613: 5, 1614: 5, 1616: 5, 1619: 5, 1623: 5, 1668: 5
}],
CAR.ODYSSEY_CHN: [{
57: 3, 145: 8, 316: 8, 342: 6, 344: 8, 380: 8, 398: 3, 399: 7, 401: 8, 404: 4, 411: 5, 420: 8, 422: 8, 423: 2, 426: 8, 432: 7, 450: 8, 464: 8, 490: 8, 506: 8, 507: 1, 597: 8, 610: 8, 611: 8, 612: 8, 617: 8, 660: 8, 661: 4, 773: 7, 780: 8, 804: 8, 808: 8, 829: 5, 862: 8, 884: 7, 892: 8, 923: 2, 929: 8, 1030: 5, 1137: 8, 1302: 8, 1348: 5, 1361: 5, 1365: 5, 1600: 5, 1601: 8, 1639: 8
}],
# 2017 Pilot Touring AND 2016 Pilot EX-L w/ Added Comma Pedal Support (512L & 513L)
CAR.PILOT: [{
57: 3, 145: 8, 228: 5, 229: 4, 308: 5, 316: 8, 334: 8, 339: 7, 342: 6, 344: 8, 379: 8, 380: 8, 392: 6, 399: 7, 419: 8, 420: 8, 422: 8, 425: 8, 426: 8, 427: 3, 432: 7, 463: 8, 464: 8, 476: 4, 490: 8, 506: 8, 507: 1, 512: 6, 513: 6, 538: 3, 542: 7, 545: 5, 546: 3, 597: 8, 660: 8, 773: 7, 777: 8, 780: 8, 795: 8, 800: 8, 804: 8, 808: 8, 819: 7, 821: 5, 829: 5, 837: 5, 856: 7, 871: 8, 882: 2, 884: 7, 891: 8, 892: 8, 923: 2, 929: 8, 963: 8, 965: 8, 966: 8, 967: 8, 983: 8, 985: 3, 1027: 5, 1029: 8, 1036: 8, 1039: 8, 1064: 7, 1088: 8, 1089: 8, 1108: 8, 1125: 8, 1296: 8, 1424: 5, 1600: 5, 1601: 8, 1612: 5, 1613: 5, 1616: 5, 1618: 5, 1668: 5
}],
# this fingerprint also includes the Passport 2019
CAR.PILOT_2019: [{
57: 3, 145: 8, 228: 5, 308: 5, 316: 8, 334: 8, 342: 6, 344: 8, 379: 8, 380: 8, 399: 7, 411: 5, 419: 8, 420: 8, 422: 8, 425: 8, 426: 8, 427: 3, 432: 7, 463: 8, 464: 8, 476: 4, 490: 8, 506: 8, 512: 6, 513: 6, 538: 3, 542: 7, 545: 5, 546: 3, 597: 8, 660: 8, 773: 7, 777: 8, 780: 8, 795: 8, 800: 8, 804: 8, 808: 8, 817: 4, 819: 7, 821: 5, 825: 4, 829: 5, 837: 5, 856: 7, 871: 8, 881: 8, 882: 2, 884: 7, 891: 8, 892: 8, 923: 2, 927: 8, 929: 8, 983: 8, 985: 3, 1029: 8, 1052: 8, 1064: 7, 1088: 8, 1089: 8, 1092: 1, 1108: 8, 1110: 8, 1125: 8, 1296: 8, 1424: 5, 1445: 8, 1600: 5, 1601: 8, 1612: 5, 1613: 5, 1614: 5, 1615: 8, 1616: 5, 1617: 8, 1618: 5, 1623: 5, 1668: 5
},
# 2019 Pilot EX-L
{
57: 3, 145: 8, 228: 5, 229: 4, 308: 5, 316: 8, 339: 7, 342: 6, 344: 8, 380: 8, 392: 6, 399: 7, 411: 5, 419: 8, 420: 8, 422: 8, 425: 8, 426: 8, 427: 3, 432: 7, 464: 8, 476: 4, 490: 8, 506: 8, 512: 6, 513: 6, 542: 7, 545: 5, 546: 3, 597: 8, 660: 8, 773: 7, 777: 8, 780: 8, 795: 8, 800: 8, 804: 8, 808: 8, 817: 4, 819: 7, 821: 5, 829: 5, 871: 8, 881: 8, 882: 2, 884: 7, 891: 8, 892: 8, 923: 2, 927: 8, 929: 8, 963: 8, 965: 8, 966: 8, 967: 8, 983: 8, 985: 3, 1027: 5, 1029: 8, 1039: 8, 1064: 7, 1088: 8, 1089: 8, 1092: 1, 1108: 8, 1125: 8, 1296: 8, 1424: 5, 1445: 8, 1600: 5, 1601: 8, 1612: 5, 1613: 5, 1616: 5, 1617: 8, 1618: 5, 1623: 5, 1668: 5
}],
# Ridgeline w/ Added Comma Pedal Support (512L & 513L)
CAR.RIDGELINE: [{
57: 3, 145: 8, 228: 5, 229: 4, 308: 5, 316: 8, 339: 7, 342: 6, 344: 8, 380: 8, 392: 6, 399: 7, 419: 8, 420: 8, 422: 8, 425: 8, 426: 8, 427: 3, 432: 7, 464: 8, 471: 3, 476: 4, 490: 8, 506: 8, 512: 6, 513: 6, 545: 5, 546: 3, 597: 8, 660: 8, 773: 7, 777: 8, 780: 8, 795: 8, 800: 8, 804: 8, 808: 8, 819: 7, 821: 5, 829: 5, 871: 8, 882: 2, 884: 7, 892: 8, 923: 2, 927: 8, 929: 8, 963: 8, 965: 8, 966: 8, 967: 8, 983: 8, 985: 3, 1027: 5, 1029: 8, 1036: 8, 1039: 8, 1064: 7, 1088: 8, 1089: 8, 1108: 8, 1125: 8, 1296: 8, 1365: 5, 1424: 5, 1600: 5, 1601: 8, 1613: 5, 1616: 5, 1618: 5, 1668: 5, 2015: 3
},
# 2019 Ridgeline
{
57: 3, 145: 8, 229: 4, 308: 5, 316: 8, 339: 7, 342: 6, 344: 8, 380: 8, 392: 6, 399: 7, 419: 8, 420: 8, 422:8, 425: 8, 426: 8, 427: 3, 432: 7, 464: 8, 476: 4, 490: 8, 545: 5, 546: 3, 597: 8, 660: 8, 773: 7, 777: 8, 795: 8, 800: 8, 804: 8, 808: 8, 819: 7, 821: 5, 871: 8, 882: 2, 884: 7, 892: 8, 923: 2, 929: 8, 963: 8, 965: 8, 966: 8, 967: 8, 983: 8, 985: 3, 1027: 5, 1029: 8, 1036: 8, 1039: 8, 1064: 7, 1088: 8, 1089: 8, 1092: 1, 1108: 8, 1125: 8, 1296: 8, 1365: 5, 424: 5, 1613: 5, 1616: 5, 1618: 5, 1623: 5, 1668: 5
}]
}
DBC = {
CAR.ACCORD: dbc_dict('honda_accord_s2t_2018_can_generated', None),
CAR.ACCORD_15: dbc_dict('honda_accord_lx15t_2018_can_generated', None),
CAR.ACCORDH: dbc_dict('honda_accord_s2t_2018_can_generated', None),
CAR.ACURA_ILX: dbc_dict('acura_ilx_2016_can_generated', 'acura_ilx_2016_nidec'),
CAR.ACURA_RDX: dbc_dict('acura_rdx_2018_can_generated', 'acura_ilx_2016_nidec'),
CAR.CIVIC: dbc_dict('honda_civic_touring_2016_can_generated', 'acura_ilx_2016_nidec'),
CAR.CIVIC_BOSCH: dbc_dict('honda_civic_hatchback_ex_2017_can_generated', None),
CAR.CRV: dbc_dict('honda_crv_touring_2016_can_generated', 'acura_ilx_2016_nidec'),
CAR.CRV_5G: dbc_dict('honda_crv_ex_2017_can_generated', None),
CAR.CRV_HYBRID: dbc_dict('honda_crv_hybrid_2019_can_generated', None),
CAR.ODYSSEY: dbc_dict('honda_odyssey_exl_2018_generated', 'acura_ilx_2016_nidec'),
CAR.ODYSSEY_CHN: dbc_dict('honda_odyssey_extreme_edition_2018_china_can', 'acura_ilx_2016_nidec'),
CAR.PILOT: dbc_dict('honda_pilot_touring_2017_can_generated', 'acura_ilx_2016_nidec'),
CAR.PILOT_2019: dbc_dict('honda_pilot_touring_2017_can_generated', 'acura_ilx_2016_nidec'),
CAR.RIDGELINE: dbc_dict('honda_ridgeline_black_edition_2017_can_generated', 'acura_ilx_2016_nidec'),
}
STEER_THRESHOLD = {
CAR.ACCORD: 1200,
CAR.ACCORD_15: 1200,
CAR.ACCORDH: 1200,
CAR.ACURA_ILX: 1200,
CAR.ACURA_RDX: 400,
CAR.CIVIC: 1200,
CAR.CIVIC_BOSCH: 1200,
CAR.CRV: 1200,
CAR.CRV_5G: 1200,
CAR.CRV_HYBRID: 1200,
CAR.ODYSSEY: 1200,
CAR.ODYSSEY_CHN: 1200,
CAR.PILOT: 1200,
CAR.PILOT_2019: 1200,
CAR.RIDGELINE: 1200,
}
SPEED_FACTOR = {
CAR.ACCORD: 1.,
CAR.ACCORD_15: 1.,
CAR.ACCORDH: 1.,
CAR.ACURA_ILX: 1.,
CAR.ACURA_RDX: 1.,
CAR.CIVIC: 1.,
CAR.CIVIC_BOSCH: 1.,
CAR.CRV: 1.025,
CAR.CRV_5G: 1.025,
CAR.CRV_HYBRID: 1.025,
CAR.ODYSSEY: 1.,
CAR.ODYSSEY_CHN: 1.,
CAR.PILOT: 1.,
CAR.PILOT_2019: 1.,
CAR.RIDGELINE: 1.,
}
# msgs sent for steering controller by camera module on can 0.
# those messages are mutually exclusive on CRV and non-CRV cars
CAMERA_MSGS = [0xe4, 0x194]
# TODO: get these from dbc file
HONDA_BOSCH = [CAR.ACCORD, CAR.ACCORD_15, CAR.ACCORDH, CAR.CIVIC_BOSCH, CAR.CRV_5G, CAR.CRV_HYBRID]
|
py | b40ba72fa7a494cffe5f901ae62024e1f25735ef | # ------------------------------------------------------------------------------
# This code is base on
# CornerNet (https://github.com/princeton-vl/CornerNet)
# Copyright (c) 2018, University of Michigan
# Licensed under the BSD 3-Clause License
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
class convolution(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(convolution, self).__init__()
pad = (k - 1) // 2
self.conv = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(pad, pad), stride=(stride, stride), bias=not with_bn)
self.bn = nn.BatchNorm2d(out_dim) if with_bn else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv = self.conv(x)
bn = self.bn(conv)
relu = self.relu(bn)
return relu
class fully_connected(nn.Module):
def __init__(self, inp_dim, out_dim, with_bn=True):
super(fully_connected, self).__init__()
self.with_bn = with_bn
self.linear = nn.Linear(inp_dim, out_dim)
if self.with_bn:
self.bn = nn.BatchNorm1d(out_dim)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
linear = self.linear(x)
bn = self.bn(linear) if self.with_bn else linear
relu = self.relu(bn)
return relu
class residual(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(residual, self).__init__()
self.conv1 = nn.Conv2d(inp_dim, out_dim, (3, 3), padding=(1, 1), stride=(stride, stride), bias=False)
self.bn1 = nn.BatchNorm2d(out_dim)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_dim, out_dim, (3, 3), padding=(1, 1), bias=False)
self.bn2 = nn.BatchNorm2d(out_dim)
self.skip = nn.Sequential(
nn.Conv2d(inp_dim, out_dim, (1, 1), stride=(stride, stride), bias=False),
nn.BatchNorm2d(out_dim)
) if stride != 1 or inp_dim != out_dim else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv1 = self.conv1(x)
bn1 = self.bn1(conv1)
relu1 = self.relu1(bn1)
conv2 = self.conv2(relu1)
bn2 = self.bn2(conv2)
skip = self.skip(x)
return self.relu(bn2 + skip)
def make_layer(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = [layer(k, inp_dim, out_dim, **kwargs)]
for _ in range(1, modules):
layers.append(layer(k, out_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
def make_layer_revr(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = []
for _ in range(modules - 1):
layers.append(layer(k, inp_dim, inp_dim, **kwargs))
layers.append(layer(k, inp_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
class MergeUp(nn.Module):
def forward(self, up1, up2):
return up1 + up2
def make_merge_layer(dim):
return MergeUp()
# def make_pool_layer(dim):
# return nn.MaxPool2d(kernel_size=2, stride=2)
def make_pool_layer(dim):
return nn.Sequential()
def make_unpool_layer(dim):
return nn.Upsample(scale_factor=2)
def make_kp_layer(cnv_dim, curr_dim, out_dim):
return nn.Sequential(
convolution(3, cnv_dim, curr_dim, with_bn=False),
nn.Conv2d(curr_dim, out_dim, (1, 1))
)
def make_inter_layer(dim):
return residual(3, dim, dim)
def make_cnv_layer(inp_dim, out_dim):
return convolution(3, inp_dim, out_dim)
class kp_module(nn.Module):
def __init__(
self, n, dims, modules, layer=residual,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, **kwargs
):
super(kp_module, self).__init__()
self.n = n
curr_mod = modules[0]
next_mod = modules[1]
curr_dim = dims[0]
next_dim = dims[1]
self.up1 = make_up_layer(
3, curr_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.max1 = make_pool_layer(curr_dim)
self.low1 = make_hg_layer(
3, curr_dim, next_dim, curr_mod,
layer=layer, **kwargs
)
self.low2 = kp_module(
n - 1, dims[1:], modules[1:], layer=layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer,
**kwargs
) if self.n > 1 else \
make_low_layer(
3, next_dim, next_dim, next_mod,
layer=layer, **kwargs
)
self.low3 = make_hg_layer_revr(
3, next_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.up2 = make_unpool_layer(curr_dim)
self.merge = make_merge_layer(curr_dim)
def forward(self, x):
up1 = self.up1(x)
max1 = self.max1(x)
low1 = self.low1(max1)
low2 = self.low2(low1)
low3 = self.low3(low2)
up2 = self.up2(low3)
return self.merge(up1, up2)
class exkp(nn.Module):
def __init__(
self, n, nstack, dims, modules, heads, pre=None, cnv_dim=256,
make_tl_layer=None, make_br_layer=None,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual
):
super(exkp, self).__init__()
self.nstack = nstack
self.heads = heads
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
## keypoint heatmaps
for head in heads.keys():
if 'hm' in head:
module = nn.ModuleList([
make_heat_layer(
cnv_dim, curr_dim, heads[head]) for _ in range(nstack)
])
self.__setattr__(head, module)
for heat in self.__getattr__(head):
heat[-1].bias.data.fill_(-2.19)
else:
module = nn.ModuleList([
make_regr_layer(
cnv_dim, curr_dim, heads[head]) for _ in range(nstack)
])
self.__setattr__(head, module)
self.relu = nn.ReLU(inplace=True)
def forward(self, image):
# print('image shape', image.shape)
inter = self.pre(image)
outs = []
for ind in range(self.nstack):
kp_, cnv_ = self.kps[ind], self.cnvs[ind]
kp = kp_(inter)
cnv = cnv_(kp)
out = {}
for head in self.heads:
layer = self.__getattr__(head)[ind]
y = layer(cnv)
out[head] = y
outs.append(out)
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def make_hg_layer(kernel, dim0, dim1, mod, layer=convolution, **kwargs):
layers = [layer(kernel, dim0, dim1, stride=2)]
layers += [layer(kernel, dim1, dim1) for _ in range(mod - 1)]
return nn.Sequential(*layers)
class HourglassNet(exkp):
def __init__(self, heads, num_stacks=2):
n = 5
dims = [256, 256, 384, 384, 384, 512]
modules = [2, 2, 2, 2, 2, 4]
super(HourglassNet, self).__init__(
n, num_stacks, dims, modules, heads,
make_tl_layer=None,
make_br_layer=None,
make_pool_layer=make_pool_layer,
make_hg_layer=make_hg_layer,
kp_layer=residual, cnv_dim=256
)
def get_large_hourglass_net(num_layers, heads, head_conv):
model = HourglassNet(heads, 2)
return model
|
py | b40ba9363768576b849b215668abfc3f947dcd92 | from os.path import join
import os
import ubelt as ub
import viame_wrangler
from fishnet.coco_api import CocoDataset
import glob
def _read(cfg):
annot_fpaths = glob.glob(cfg.annots)
print('annot_fpaths = {}'.format(ub.repr2(annot_fpaths)))
print('Reading raw mscoco files')
dsets = []
for fpath in sorted(annot_fpaths):
print('reading fpath = {!r}'.format(fpath))
try:
dset = CocoDataset(fpath, tag='', img_root=cfg.img_root)
assert not dset.missing_images()
except AssertionError:
hack = os.path.basename(fpath).split('-')[0].split('.')[0]
dset = CocoDataset(fpath, tag=hack, img_root=join(cfg.img_root, hack))
print(ub.repr2(dset.missing_images()))
assert not dset.missing_images(), 'missing!'
bad_annots = dset._find_bad_annotations()
if bad_annots:
print(ub.repr2(bad_annots))
assert False, 'bad annotatinos'
print(ub.repr2(dset.basic_stats()))
dsets.append(dset)
print('Merging')
merged = CocoDataset.union(*dsets, autobuild=False)
# merged._remove_bad_annotations()
merged.img_root = cfg.img_root
merged._build_index()
return merged
def read_fine_merged():
cfg = viame_wrangler.config.WrangleConfig({
# 'img_root': '~/data/viame-challenge-2018/phase0-imagery',
# 'annots': '~/data/viame-challenge-2018/phase0-fine*keypoint*.json'
# 'img_root': '~/data/viame-challenge-2018/phase1-imagery',
# 'annots': '~/data/viame-challenge-2018/phase1-annotations/*/*fine*keypoint*.json',
'img_root': '/data/projects/noaa/training_data/imagery',
'annots': '/data/projects/noaa/training_data/annotations/*/*fine*-keypoint*',
})
merged = _read(cfg)
return merged
def read_coarse_merged():
cfg = viame_wrangler.config.WrangleConfig({
# 'img_root': '~/work/viame-challenge-2018/phase0-imagery',
# 'annots': '~/work/viame-challenge-2018/phase0-fine*keypoint*.json'
# 'img_root': '~/data/viame-challenge-2018/phase1-imagery',
# 'annots': '~/data/viame-challenge-2018/phase1-annotations/*/*coarse*-keypoint*',
'img_root': '/data/projects/noaa/training_data/imagery',
'annots': '/data/projects/noaa/training_data/annotations/*/*fine*-keypoint*',
})
merged = _read(cfg)
return merged
def show_keypoint_annots():
merged = read_fine_merged()
def images_with_keypoints():
keypoint_gids = set()
for aid, ann in merged.anns.items():
if ann['roi_shape'] == 'keypoints':
keypoint_gids.add(ann['image_id'])
relevant = ub.dict_subset(merged.gid_to_aids, keypoint_gids)
relevant = {gid: [a for a in aids if merged.anns[a]['roi_shape'] == 'keypoints'] for gid, aids in relevant.items()}
gid_list = ub.argsort(ub.map_vals(len, relevant))[::-1]
return gid_list
def sort_gids_by_nannots(gids):
return ub.argsort(ub.map_vals(len, ub.dict_subset(merged.gid_to_aids, gids, default=[])))[::-1]
def images_with_keypoints_and_boxes():
keypoint_gids = set()
for aid, ann in merged.anns.items():
if ann['roi_shape'] == 'keypoints':
keypoint_gids.add(ann['image_id'])
gid_list = []
for gid in keypoint_gids:
aids = merged.gid_to_aids[gid]
types = set()
for ann in ub.take(merged.anns, aids):
types.add(ann['roi_shape'])
if len(types) > 1:
gid_list.append(gid)
gid_list = sort_gids_by_nannots(gid_list)
return gid_list
def image_from_each_dataset():
groups = ub.ddict(list)
for gid, img in merged.imgs.items():
groups[os.path.dirname(img['file_name'])].append(gid)
gid_groups = []
for gids in groups.values():
gids = sort_gids_by_nannots(gids)
gid_groups.append(gids)
# round robin sample
datas = [gid for x in zip(*gid_groups) for gid in x]
return datas
# gid_list = images_with_keypoints()
gid_list = images_with_keypoints_and_boxes()
gid_list = image_from_each_dataset()
# gid = gid_list[2]
# import matplotlib.pyplot as plt
# plt.gcf().clf()
# merged.show_annotation(gid=gid)
import utool as ut
if ut.inIPython():
import IPython
IPython.get_ipython().magic('pylab qt5 --no-import-all')
from matplotlib import pyplot as plt
for gid in ut.InteractiveIter(gid_list):
try:
fig = plt.figure(1)
fig.clf()
merged.show_annotation(gid=gid)
name = os.path.basename(os.path.dirname(merged.imgs[gid]['file_name']))
ax = plt.gca()
plt.gca().set_title(name)
ax.set_xticks([])
ax.set_yticks([])
plt.gca().grid('off')
fig.canvas.draw()
except Exception:
print('cannot draw')
def nx_ascii_tree(graph, key=None):
"""
Creates an printable ascii representation of a directed tree / forest.
Args:
graph (nx.DiGraph): each node has at most one parent (
i.e. graph must be a directed forest)
key (str): if specified, uses this node attribute as a label instead of
the id
References:
https://stackoverflow.com/questions/32151776/visualize-tree-in-bash-like-the-output-of-unix-tree
Example:
>>> import networkx as nx
>>> graph = nx.dfs_tree(nx.balanced_tree(2, 2), 0)
>>> text = nx_ascii_tree(graph)
>>> print(text)
└── 0
├── 1
│ ├── 3
│ └── 4
└── 2
├── 5
└── 6
"""
import six
import networkx as nx
branch = '├─'
pipe = '│'
end = '└─'
dash = '─'
assert nx.is_forest(graph)
assert nx.is_directed(graph)
lines = []
def _draw_tree_nx(graph, node, level, last=False, sup=[]):
def update(left, i):
if i < len(left):
left[i] = ' '
return left
initial = ['{} '.format(pipe)] * level
parts = six.moves.reduce(update, sup, initial)
prefix = ''.join(parts)
if key is None:
node_label = str(node)
else:
node_label = str(graph.nodes[node]['label'])
suffix = '{} '.format(dash) + node_label
if last:
line = prefix + end + suffix
else:
line = prefix + branch + suffix
lines.append(line)
children = list(graph.succ[node])
if children:
level += 1
for node in children[:-1]:
_draw_tree_nx(graph, node, level, sup=sup)
_draw_tree_nx(graph, children[-1], level, True, [level] + sup)
def draw_tree(graph):
source_nodes = [n for n in graph.nodes if graph.in_degree[n] == 0]
if source_nodes:
level = 0
for node in source_nodes[:-1]:
_draw_tree_nx(graph, node, level, last=False, sup=[])
_draw_tree_nx(graph, source_nodes[-1], level, last=True, sup=[0])
draw_tree(graph)
text = '\n'.join(lines)
return text
def printable_heirarchy():
fine = read_fine_merged()
coarse = read_coarse_merged()
# from viame_wrangler import mappings
# COARSE CAT
import networkx as nx
g = nx.DiGraph()
for cat in coarse.cats.values():
# for cat in dset.dataset['fine_categories']:
# for cat in mappings.CoarseChallenge.heirarchy:
g.add_node(cat['name'])
if 'supercategory' in cat:
g.add_edge(cat['supercategory'], cat['name'])
for n in g.nodes:
cat = coarse.name_to_cat[n]
cid = cat['id']
n_examples = len(coarse.cid_to_aids[cid])
g.node[n]['label'] = '"{}":{}'.format(n, n_examples)
print(nx_ascii_tree(g, 'label'))
# FINE CAT
# dset = merged
import networkx as nx
g = nx.DiGraph()
for cat in fine.cats.values():
# for cat in dset.dataset['fine_categories']:
# for cat in mappings.FineGrainedChallenge.heirarchy:
g.add_node(cat['name'])
if 'supercategory' in cat:
g.add_edge(cat['supercategory'], cat['name'])
for n in g.nodes:
try:
cat = fine.name_to_cat[n]
cid = cat['id']
n_examples = len(fine.cid_to_aids[cid])
except Exception:
n_examples = 0
g.node[n]['label'] = '"{}":{}'.format(n, n_examples)
print(nx_ascii_tree(g, 'label'))
|
py | b40baac4aff300c45631b4a628a9433e88cef707 | """
Context
-------
The approach of building a consensus/majority graph of enzymes/EC numbers to find a core metabolism shared among several organisms has to be validated against previous research.
One such previous research deals with E. coli, but uses a different approach, asking not what the core metabolism 'can do', but what it 'always does'.
Almaas et al. (2005) list core reactions calculated via flux analysis in table S1 (https://doi.org/10.1371/journal.pcbi.0010068.st001), some of which have annotated EC numbers.
These EC numbers are used to validate the approach of this library. Multifunctional enzymes and EC numbers containing wildcards (e.g. 1.2.-.-) are excluded on both sides, to minimise statistical skew.
This leaves 62 EC numbers in Almaas' approach.
Question
--------
Does the consensus/majority graph approach to core metabolism yield a similar set of EC numbers as the approach of Almaas et al. (2005)?
Method
------
- extract EC numbers from Almaas et al. (2005) by hand
- get group of organisms 'Escherichia coli'
- REPEAT for varying majority-percentages:
- calculate EC numbers occuring in group's core metabolism
- overlap Almaas' set with ours and print amount of EC numbers inside the intersection and falling off either side
Result
------
::
Maj. % others both ours
100%: 28 34 381
90%: 19 43 491
80%: 19 43 499
70%: 19 43 510
60%: 19 43 518
50%: 19 43 522
40%: 19 43 531
30%: 19 43 542
20%: 19 43 550
10%: 19 43 564
1%: 19 43 602
Conclusion
----------
With a 90% majority and below, the number of overlapping ECs does not increase any more. This indicates that, at least for E. coli, a 90% majority is enough
to create a stable core metabolism, diminishing the skew excerted by unusually specialised organisms.
In the case of E. coli these could be soil-based E. coli strains, which remains to be researched.
About 69% of the ECs in the reaction-based core metabolism, as postulated by Almaas et al., are also included in the majority-based core metabolism of
our approach. Due to some ECs missing in Almaas' table S1, this percentage could have been even bigger.
This substantial overlap shows most essential reactions are also covered by a majority approach.
However, this goes along with two interesting observations:
1) 31% of essential reactions are not included in any majority, not even in a single organism from KEGG at 1% majority (effectively n=1).
This could be because of a flaw in either approach, or because the data Almaas et al. use stems from the year 2000 (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC25862/)
and it might be that then Escherichia coli MG1655 was said to include different ECs than in today's KEGG database. This has to be investigated.
2) Only 8% of majority ECs (at 90%) are essential reactions. This indicates that while E. coli organisms share many ECs, most of them are only active at
certain times.
"""
from FEV_KEGG.Evolution.Taxonomy import NCBI
import FEV_KEGG.KEGG.Organism as Organism
from FEV_KEGG.Graph.Elements import EcNumber
if __name__ == '__main__':
output = ['Maj. %\tothers\tboth\tours']
#- extract EC numbers from Almaas et al. (2005) by hand
theirECnumberStrings = ['1.1.1.158', '1.1.1.193', '1.1.1.25', '1.5.1.3', '1.6.4.5', '1.7.99.5', '2.2.1.2', '2.3.1.129', '2.3.1.39', '2.4.1.182', '2.4.1.21', '2.5.1.15', '2.5.1.19', '2.5.1.7', '2.5.1.9', '2.6.1.16', '2.7.1.107', '2.7.1.130', '2.7.1.23', '2.7.1.24', '2.7.1.26', '2.7.1.33', '2.7.2.3', '2.7.4.6', '2.7.4.8', '2.7.4.9', '2.7.6.3', '2.7.7.18', '2.7.7.2', '2.7.7.23', '2.7.7.27', '2.7.7.3', '2.7.7.38', '2.7.7.41', '2.7.8.5', '2.7.8.8', '3.1.3.45', '3.5.4.16', '3.5.4.25', '3.5.4.26', '3.6.1.1', '3.6.1.34', '3.6.1.45', '4.1.1.36', '4.1.1.65', '4.1.2.13', '4.1.2.16', '4.1.2.25', '4.2.1.10', '4.2.1.11', '4.6.1.3', '4.6.1.4', '5.1.1.3', '5.3.1.1', '5.3.1.13', '6.3.2.12', '6.3.2.13', '6.3.2.15', '6.3.2.4', '6.3.2.5', '6.3.2.8', '6.3.2.9']
theirECnumbers = set()
for string in theirECnumberStrings:
theirECnumbers.add( EcNumber(string) )
#- get group of organisms 'Escherichia coli'
taxonomy = NCBI.getTaxonomy()
group = Organism.Group( taxonomy.getOrganismAbbreviationsByPath('Escherichia coli', oneOrganismPerSpecies=False) )
#- REPEAT for varying majority-percentages:
for percentage in [100, 90, 80, 70, 60, 50, 40, 30, 20, 10 , 1]:
#- calculate EC numbers occuring in group's core metabolism
ourECnumbersWithWildcard = group.majorityEcGraph(majorityPercentage = percentage, noMultifunctional = True).getECs()
ourECnumbers = EcNumber.removeWildcards(ourECnumbersWithWildcard)
#- overlap Almaas' set with ours and print amount of EC numbers inside the intersection and falling off either side
onlyInTheirs = theirECnumbers.difference( ourECnumbers )
inBoth = theirECnumbers.intersection( ourECnumbers )
onlyInOurs = ourECnumbers.difference( theirECnumbers )
output.append(str(percentage) + '%:\t' + str(len(onlyInTheirs)) + '\t' + str(len(inBoth)) + '\t' + str(len(onlyInOurs)) )
for line in output:
print(line) |
py | b40bacfa17a7e80e35f741385c4cc0c4e0884031 | from . import events
@events.emits('change')
class DelayedUpdater(events.EventMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dirty_self = False
self.__dirty_children = False
def update_if_needed(self):
if self.__dirty_self:
self._update()
self._mark_clean()
if self.__dirty_children:
self._update_children()
self._mark_children_clean()
@property
def is_dirty(self):
return self.__dirty_self or self.__dirty_children
@property
def is_clean(self):
return not self.__dirty_self and not self.__dirty_children
def _child_change_handler(self, evt):
if self.__dirty_self or self.__dirty_children:
return
self.__dirty_children = True
self.emit('change', {})
def _change_handler(self, evt):
if self.__dirty_self:
return
self.__dirty_self = True
if not self.__dirty_children:
self.emit('change', {})
def _mark_dirty(self):
self.__dirty_self = True
def _mark_clean(self):
self.__dirty_self = False
def _mark_children_dirty(self):
self.__dirty_children = True
def _mark_children_clean(self):
self.__dirty_children = False
def _update(self):
pass
def _update_children(self):
children = getattr(self, '_children', [])
for ch in children:
ch.update_if_needed()
|
py | b40bad0e1526cf4edf585ba15534bedaca4bee41 | import mock
import unittest
from simmetrica import Simmetrica
class TestSimmetrica(unittest.TestCase):
def test_push(self):
with mock.patch('simmetrica.simmetrica.StrictRedis') as StrictRedis:
simmetrica = Simmetrica()
hincrby = StrictRedis.return_value.pipeline.return_value.hincrby
simmetrica.push('foo')
self.assertTrue(hincrby.called)
def test_get_timestamps_for_query(self):
simmetrica = Simmetrica()
timestamps = simmetrica.get_timestamps_for_query(1363707480, 1363707780, 60)
expected = [1363707480, 1363707540, 1363707600, 1363707660, 1363707720]
self.assertEqual(list(timestamps), expected)
def test_get_timestamps_for_push(self):
simmetrica = Simmetrica()
timestamps = list(simmetrica.get_timestamps_for_push(1363707716))
self.assertEqual(sorted(timestamps), [
('15min', 1363707000),
('5min', 1363707600),
('day', 1363651200),
('hour', 1363705200),
('min', 1363707660),
('month', 1363392000),
('week', 1363219200),
('year', 1356048000),
])
def test_round_time(self):
simmetrica = Simmetrica()
rounded_time = simmetrica.round_time(1363599249, 3600)
self.assertEqual(rounded_time, 1363597200)
def test_get_event_key(self):
simmetrica = Simmetrica()
key = simmetrica.get_event_key('foo', '5min')
self.assertEqual('simmetrica:foo:5min', key)
if __name__ == '__main__':
unittest.main()
|
py | b40baed9700793f686706a4ec04a28aa833ff0e5 | import unittest
from app.models import Headlines, Sources
class HeadlinesTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Headlines class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_headline = Headline('Victor Kibocha','Kyrsten Sinema Declared War Winner','Ms. Sinema scored a ground breaking victory','www.news.com','www.sinema123.com', '2018-9-12-13T00:50:04Z', 'If one of the biggest themes...' )
def test_instance(self):
self.assertTrue(isinstance(self.new_headline,Headline))
class SourcesTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Sources class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_source = Source('abc-news','ABC News','Your trusted source for breaking news','www.abcnews.com','general', 'en', 'us' )
def test_instance(self):
self.assertTrue(isinstance(self.new_source,Source))
|
py | b40bafba45323468ab1b9f320dace3420d3eb09f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0002_envvariable'),
]
operations = [
migrations.AlterField(
model_name='controller',
name='marathon_cmd',
field=models.TextField(default=b''),
),
]
|
py | b40bb024e55445b9ff60eff38c32fffe929a5b8e | """
Django settings for example project.
Generated by Cookiecutter Django Package
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "9#k)q8^7*@8$zr@r8-^1u*c!k^jt&7jp_^5=vwzf!n(-2)xlie"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_honeycomb',
# if your app has other dependencies that need to be added to the site
# they should be added here
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
py | b40bb0346b48e6c18cd76dbabdefd1eddb95a62a | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .faster_tokenizer import *
from .model_utils import *
from .ernie_model import *
|
py | b40bb04405c84d30fcc5d01b2f1744b9d4fe15a3 | import asyncio
import discord
from discord.ext.buttons import Paginator
class Pag(Paginator):
async def teardown(self):
try:
await self.page.clear_reactions()
except discord.HTTPException:
pass
def clean_code(content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith("```") and content.endswith("```"):
return "\n".join(content.split("\n")[1:])[:-3]
else:
return content
async def get_message(
bot, ctx, content_one="Default Message", content_two="\uFEFF", timeout=100
):
"""
This function sends an embed containing the params and then waits for a message to return
"""
embed = discord.Embed(
title=f"{content_one}",
description=f"{content_two}",
)
sent = await ctx.send(embed=embed)
try:
msg = await bot.wait_for(
"message",
timeout=timeout,
check=lambda message: message.author == ctx.author
and message.channel == ctx.channel,
)
if msg:
return msg.content
except asyncio.TimeoutError:
return False
async def review_embed(bot, ctx, embed) -> bool:
"""Given an embed, send it and wait for a review"""
m = await ctx.send("Preview:\nYes | No", embed=embed, delete_after=35)
await m.add_reaction("👍")
await m.add_reaction("👎")
def check(reaction, user):
return user.id == ctx.author.id and str(reaction.emoji) in ["👍", "👎"]
try:
reaction, user = await bot.wait_for("reaction_add", timeout=30, check=check)
except asyncio.TimeoutError:
return False
else:
if str(reaction.emoji) == "👍":
return True
return False |
py | b40bb121ce3565026d04aa669359fbc04768539f | from Jumpscale import j
try:
from intercom.client import Client
except:
j.builders.runtimes.python3.pip_package_install("python-intercom")
from intercom.client import Client
from intercom.errors import HttpError
import intercom
intercom.HttpError = HttpError
intercom.__version__ = "3.1.0"
JSConfigClient = j.baseclasses.object_config
class IntercomClient(JSConfigClient):
_SCHEMATEXT = """
@url = jumpscale.intercom.client
name** = "" (S)
token_ = "dG9rOmNjNTRlZDFiX2E3OTZfNGFiM185Mjk5X2YzMGQyN2NjODM4ZToxOjA=" (S)
"""
def _init(self, **kwargs):
self.token = self.token_
self.api = Client(personal_access_token=self.token)
def send_in_app_message(self, body, admin_id, user_id):
"""
sending an in-app message from an admin to user
:param body: body of the email
:type body: str
:param admin_id: id of sender admin
:type admin_id: str
:param user_id: id of user who will receive the message
:type user_id: str
"""
self.api.messages.create(
**{
"message_type": "inapp",
"body": body,
"from": {"type": "admin", "id": admin_id},
"to": {"type": "user", "id": user_id},
}
)
def send_mail_message(self, subject, body, template, admin_id, user_id):
"""
sending a mail message from an admin to user
:param subject: subject of the email
:param subject:str
:param body: body of the email
:param body:str
:param template: has one of the 2 values "plain", or "personal"
:param template:str
:param admin_id: id of sender admin
:param admin_id:str
:param user_id: id of user who will receive the message
:param user_id:str
"""
self.api.messages.create(
**{
"message_type": "email",
"subject": subject,
"body": body,
"template": template,
"from": {"type": "admin", "id": admin_id},
"to": {"type": "user", "id": user_id},
}
)
def get_user(self, email):
user = self.api.users.find(email=email)
return user
def get_all_users(self):
users = self.api.users.all()
return users
def delete_user(self, email):
user = self.get_user(email=email)
self.api.users.delete(user)
def get_all_admins(self):
admins = self.api.admins.all()
return admins
def get_admin(self, name):
admins = self.api.admins.all()
for admin in admins:
if admin.name == name:
return admin
return None
|
py | b40bb18d9fb453ddff82c354aa7e848152293e95 | # Generated by Django 2.1.15 on 2021-06-03 06:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
py | b40bb1c5e1555c7d9540d52b49ddbb1745fba51d | import math
import torch
from torch.nn import init
from torch.nn.parameter import Parameter
from .posteriors import PosteriorNormal
from .priors import PriorLaplace, PriorNormal, prior_builder
from .variational import BBBModule
class LinearPathwise(BBBModule):
__constants__ = ['bias']
def __init__(self, in_features, out_features, bias=True, prior_type="normal"):
super(LinearPathwise, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight_posterior = PosteriorNormal(
self, "weight",
Parameter(torch.Tensor(out_features, in_features)),
Parameter(torch.Tensor(out_features, in_features))
)
self.use_bias = bias
if self.use_bias:
self.bias_posterior = PosteriorNormal(
self, "bias",
Parameter(torch.Tensor(out_features)),
Parameter(torch.Tensor(out_features))
)
else:
self.register_parameter('bias', None)
self.prior = prior_builder(prior_type, self)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.out_features)
init.normal_(self.weight_loc, mean=0.0, std=stdv)
init.normal_(self.weight_scale, mean=-7.0, std=stdv)
if self.use_bias:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight_loc)
bound = 1 / math.sqrt(fan_in)
init.normal_(self.bias_loc, mean=0.0, std=bound)
init.normal_(self.bias_scale, mean=-7.0, std=bound)
def kl_loss(self):
total_loss = (self.weight_posterior.log_prob(self.weight_sample) -
self.prior.log_prob(self.weight_sample)).sum()
if self.use_bias:
total_loss += (self.bias_posterior.log_prob(self.bias_sample) -
self.prior.log_prob(self.bias_sample)).sum()
return total_loss
def forward(self, input):
self.weight_sample = self.weight_posterior.rsample()
output = torch.matmul(input, self.weight_sample.t())
if self.use_bias:
self.bias_sample = self.bias_posterior.rsample()
output += self.bias_sample
return output
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.use_bias
)
|
py | b40bb2097648c6214c2bc10ca022bdeff082594a | from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
from django.forms import ModelForm
from django import forms
from user.models import Advisor, President, Administrator, Person
from django.contrib.auth.models import User
from django.forms.widgets import TextInput
class AdvisorForm(ModelForm):
name = forms.CharField(label='Nome', max_length=50)
cpf = forms.CharField(label='CPF', max_length=12)
municipio = forms.CharField(label='Município', max_length=30)
bairro = forms.CharField(label='Bairro', max_length=30)
uf = forms.CharField(label='UF', max_length=2)
cep = forms.CharField(label='CEP', max_length=10)
class Meta:
model = Advisor
exclude = ['user', 'nome_cae', 'tipo_cae']
def __init__(self, *args, **kwargs):
super(AdvisorForm, self).__init__(*args, **kwargs)
self.fields['cep'].widget = TextInput(attrs={
'id': 'cep',
'class': 'cep',
'name': 'cep',
'placeholder': '',
'onblur': 'pesquisacep(this.value)'
})
self.fields['bairro'].widget = TextInput(attrs={
'id': 'bairro',
'class': 'bairro',
'name': 'bairro',
'placeholder': '',
})
self.fields['municipio'].widget = TextInput(attrs={
'id': 'municipio',
'class': 'municipio',
'name': 'municipio',
'placeholder': '',
})
self.fields['uf'].widget = TextInput(attrs={
'id': 'uf',
'class': 'uf',
'name': 'uf',
'placeholder': '',
})
class PresidentForm(ModelForm):
name = forms.CharField(label="Nome", max_length=50)
email = forms.EmailField(label="Email", max_length=100)
username = forms.CharField(label="Usuário", max_length=50)
password = forms.CharField(
label="Senha",
widget=forms.PasswordInput(),
max_length=32
)
class Meta:
model = President
exclude = [
'user',
'cpf',
'cep',
'bairro',
'municipio',
'uf',
'tipo_cae',
'nome_cae'
]
def save(self, commit=True):
president = super(PresidentForm, self).save(commit=False)
president.name = self.cleaned_data['name']
email = self.cleaned_data['email']
president.email = email
username = self.cleaned_data['username']
password = self.cleaned_data['password']
if User.objects.filter(username=username).exists():
self.add_error(
'username',
'Este nome de usuário já está cadastrado!'
)
elif Person.objects.filter(email=email).exists():
self.add_error('email', 'Este email já está cadastrado!')
else:
user = User.objects.create_user(username=username,
password=password,
email=email)
president.user = user
content_type = ContentType.objects.get_for_model(President)
president_perm = Permission.objects.get(codename='president',
content_type=content_type)
user.user_permissions.add(president_perm)
if commit:
president.save()
return president
class AdministratorForm(ModelForm):
name = forms.CharField(label="Nome", max_length=50)
email = forms.EmailField(label="Email", max_length=100)
username = forms.CharField(label="Usuário", max_length=50)
password = forms.CharField(
label="Senha",
widget=forms.PasswordInput(),
max_length=32
)
class Meta:
model = Administrator
exclude = ["user"]
def save(self, commit=True):
admin = super(AdministratorForm, self).save(commit=False)
admin.name = self.cleaned_data['name']
email = self.cleaned_data['email']
admin.email = email
username = self.cleaned_data['username']
password = self.cleaned_data['password']
if User.objects.filter(username=username).exists():
self.add_error(
'username',
'Este nome de usuário já está cadastrado!'
)
elif Person.objects.filter(email=email).exists():
self.add_error('email', 'Este email já está cadastrado!')
else:
user = User.objects.create_user(username=username,
password=password,
is_superuser=True)
admin.user = user
content_type = ContentType.objects.get_for_model(Administrator)
admin_perm = Permission.objects.get(codename='administrator',
content_type=content_type)
user.user_permissions.add(admin_perm)
if commit:
admin.save()
return admin
class ConfirmUserForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ConfirmUserForm, self).__init__(*args, **kwargs)
self.fields['username'].help_text = None
self.fields['username'].widget.attrs['readonly'] = True
self.fields['is_active'].help_text = None
self.fields['email'].help_text = None
self.fields['email'].widget.attrs['readonly'] = True
class Meta:
model = User
fields = ('username', 'email', 'is_active',)
def save(self, commit=True):
user = super(ConfirmUserForm, self)
if commit:
user.save()
return user
|
py | b40bb25b80bc8c4ff83fbee5dd65b013c7c63eb1 | """Test pbtranscript.Classifier."""
import unittest
import filecmp
import os.path as op
from pbcommand.pb_io.report import load_report_from_json
from pbcore.util.Process import backticks
from pbtranscript.io.Summary import ClassifySummary, ClusterSummary
def rm_version_string(infn, outfn):
cmd = "cat %s |grep -v '_changelist' | grep -v '_version' > %s" % (infn, outfn)
_o, _c, _e = backticks(cmd)
if _c != 0:
raise RuntimeError("Failed to run %s" % cmd)
def _compare_reports(self, rpt_json1, rpt_json2):
rpt1 = load_report_from_json(rpt_json1)
rpt2 = load_report_from_json(rpt_json2)
attr1 = {a.id:a.value for a in rpt1.attributes}
attr2 = {a.id:a.value for a in rpt2.attributes}
self.assertEqual(attr1, attr2)
class Test_ClassifySummary(unittest.TestCase):
"""Test ClassifySummary."""
def setUp(self):
"""Set up test data."""
self.rootDir = op.dirname(op.dirname(op.abspath(__file__)))
self.testDir = op.join(self.rootDir, "")
def test_write(self):
"""Test ClassifySummary.write."""
obj = ClassifySummary()
obj.num_reads = 100
obj.num_5_seen = 90
obj.num_3_seen = 70
obj.num_polya_seen = 70
obj.num_filtered_short_reads = 10
obj.num_nfl = 50
obj.num_fl = 40
obj.num_flnc = 39
obj.num_flc = 1
obj.num_flnc_bases = 10001
outFN = op.join(self.testDir, "out/test_ClassifySummary.txt")
stdoutFN = op.join(self.testDir, "stdout/test_ClassifySummary.txt")
obj.write(outFN)
self.assertTrue(filecmp.cmp(outFN, stdoutFN))
outFN = op.join(self.testDir, "out/test_ClassifySummary.json")
stdoutFN = op.join(self.testDir, "stdout/test_ClassifySummary.json")
obj.write(outFN)
rm_version_string(outFN, outFN + "tmp1")
rm_version_string(stdoutFN, outFN + "tmp2")
_compare_reports(self, outFN, stdoutFN)
#self.assertTrue(filecmp.cmp(outFN + "tmp1", outFN + "tmp2"))
class Test_ClusterSummary(unittest.TestCase):
"""Test ClusterSummary."""
def setUp(self):
"""Set up test data"""
self.testDir = op.dirname(op.dirname(op.abspath(__file__)))
def test_write(self):
"""Test ClusterSummary.write."""
obj = ClusterSummary()
obj.num_consensus_isoforms = 97
obj.num_total_bases = 97 * 3945
outFN = op.join(self.testDir, "out/test_ClusterSummary.txt")
stdoutFN = op.join(self.testDir, "stdout/test_ClusterSummary.txt")
obj.write(outFN)
self.assertTrue(filecmp.cmp(outFN, stdoutFN))
outFN = op.join(self.testDir, "out/test_ClusterSummary.json")
stdoutFN = op.join(self.testDir, "stdout/test_ClusterSummary.json")
obj.write(outFN)
rm_version_string(outFN, outFN + "tmp1")
rm_version_string(stdoutFN, outFN + "tmp2")
_compare_reports(self, outFN, stdoutFN)
#self.assertTrue(filecmp.cmp(outFN + "tmp1", outFN + "tmp2"))
|
py | b40bb2b725cc93c5ec31df4ceca627c1a9092ad2 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RunbookCreateOrUpdateDraftParameters(Model):
"""The parameters supplied to the create or update runbook operation.
All required parameters must be populated in order to send to Azure.
:param runbook_content: Required. Content of the Runbook.
:type runbook_content: str
"""
_validation = {
'runbook_content': {'required': True},
}
_attribute_map = {
'runbook_content': {'key': 'runbookContent', 'type': 'str'},
}
def __init__(self, **kwargs):
super(RunbookCreateOrUpdateDraftParameters, self).__init__(**kwargs)
self.runbook_content = kwargs.get('runbook_content', None)
|
py | b40bb3383d246c9ba9a3ed6a99551a0f7c56c534 | #/*
# IBM Confidential
# OCO Source Materials
# 5737-I23
# Copyright IBM Corp. 2021
# The source code for this program is not published or otherwise divested of its trade secrets, irrespective of what has been deposited with the U.S Copyright Office.
# */
print AdminApp.edit('content-services-graphql', '[-MapRolesToUsers [["AllAuthenticated" "no" "yes" "" "" "no" "" "" ]]]')
AdminConfig.save()
|
py | b40bb3624fd27b1b80c4158bae834dff3370257a | import pytest
from peewee import JOIN
from playhouse.test_utils import assert_query_count
from data.database import Repository, RepositoryPermission, TeamMember, Namespace
from data.model._basequery import filter_to_repos_for_user
from data.model.organization import get_admin_users
from data.model.user import get_namespace_user
from util.names import parse_robot_username
from test.fixtures import *
def _is_team_member(team, user):
return user.id in [
member.user_id for member in TeamMember.select().where(TeamMember.team == team)
]
def _get_visible_repositories_for_user(
user, repo_kind="image", include_public=False, namespace=None
):
"""
Returns all repositories directly visible to the given user, by either repo permission, or the
user being the admin of a namespace.
"""
for repo in Repository.select():
if repo_kind is not None and repo.kind.name != repo_kind:
continue
if namespace is not None and repo.namespace_user.username != namespace:
continue
if include_public and repo.visibility.name == "public":
yield repo
continue
# Direct repo permission.
try:
RepositoryPermission.get(repository=repo, user=user).get()
yield repo
continue
except RepositoryPermission.DoesNotExist:
pass
# Team permission.
found_in_team = False
for perm in RepositoryPermission.select().where(RepositoryPermission.repository == repo):
if perm.team and _is_team_member(perm.team, user):
found_in_team = True
break
if found_in_team:
yield repo
continue
# Org namespace admin permission.
if user in get_admin_users(repo.namespace_user):
yield repo
continue
@pytest.mark.parametrize(
"username",
[
"devtable",
"devtable+dtrobot",
"public",
"reader",
],
)
@pytest.mark.parametrize("include_public", [True, False])
@pytest.mark.parametrize("filter_to_namespace", [True, False])
@pytest.mark.parametrize(
"repo_kind",
[
None,
"image",
"application",
],
)
def test_filter_repositories(
username, include_public, filter_to_namespace, repo_kind, initialized_db
):
namespace = username if filter_to_namespace else None
if "+" in username and filter_to_namespace:
namespace, _ = parse_robot_username(username)
user = get_namespace_user(username)
query = (
Repository.select()
.distinct()
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(Repository)
.join(RepositoryPermission, JOIN.LEFT_OUTER)
)
# Prime the cache.
Repository.kind.get_id("image")
with assert_query_count(1):
found = list(
filter_to_repos_for_user(
query,
user.id,
namespace=namespace,
include_public=include_public,
repo_kind=repo_kind,
)
)
expected = list(
_get_visible_repositories_for_user(
user, repo_kind=repo_kind, namespace=namespace, include_public=include_public
)
)
assert len(found) == len(expected)
assert {r.id for r in found} == {r.id for r in expected}
|
py | b40bb4f45d2f555168039ce677323274113db946 | #
# Copyright (c) 2020 Xilinx, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
# building fsbl image specific configurations
component = "zynqmp_fsbl" # FSBL build is needed to run any zynqmp application on board
|
py | b40bb5263d8387b930b4df1b72004b068269978a | # from argparse import Namespace
# from contextlib import ExitStack
# import dask
# import dask.array as da
# from daskms.constants import DASKMS_PARTITION_KEY as PARTITION_KEY
# from distributed import Client, LocalCluster
# import numpy as np
# import pytest
# import xarray as xr
# from africanus.rime.dask import phase_delay
# from quartical.data_handling.ms_handler import read_xds_list
# from quartical.scheduling import install_plugin, annotate, dataset_partition
# def test_array_annotation():
# A = da.ones((10, 10), chunks=(3, 4), dtype=np.complex64)
# assert A.__dask_graph__().layers[A.name].annotations is None
# annotate(A)
# expected = {"__dask_array__": {
# "chunks": ((3, 3, 3, 1), (4, 4, 2)),
# "dtype": "complex64"}
# }
# assert A.__dask_graph__().layers[A.name].annotations == expected
# def test_xarray_datarray_annotation():
# A = da.ones((10, 10), chunks=(3, 4), dtype=np.complex64)
# xA = xr.DataArray(A, dims=("x", "y"))
# assert A.__dask_graph__().layers[A.name].annotations is None
# annotate(xA)
# expected = {"__dask_array__": {
# "dims": ("x", "y"),
# "chunks": ((3, 3, 3, 1), (4, 4, 2)),
# "dtype": "complex64"}
# }
# assert A.__dask_graph__().layers[A.name].annotations == expected
# def test_xarray_dataset_annotation():
# A = da.ones((10, 10), chunks=(3, 4), dtype=np.complex64)
# B = da.ones((10,), chunks=3, dtype=np.float32)
# partition_schema = (("SCAN", "int32"), ("DDID", "int32"))
# ds = xr.Dataset({"A": (("x", "y"), A), "B": (("x",), B)},
# attrs={
# # Schema must be present to extract
# # partition values
# PARTITION_KEY: partition_schema,
# "SCAN": 1,
# "DDID": 2
# })
# assert A.__dask_graph__().layers[A.name].annotations is None
# assert B.__dask_graph__().layers[B.name].annotations is None
# annotate(ds)
# expected = {"__dask_array__": {
# "dims": ("x", "y"),
# "chunks": ((3, 3, 3, 1), (4, 4, 2)),
# "partition": (("SCAN", 1), ("DDID", 2)),
# "dtype": "complex64"}
# }
# assert A.__dask_graph__().layers[A.name].annotations == expected
# expected = {"__dask_array__": {
# "dims": ("x",),
# "chunks": ((3, 3, 3, 1),),
# "partition": (("SCAN", 1), ("DDID", 2)),
# "dtype": "float32"}
# }
# assert B.__dask_graph__().layers[B.name].annotations == expected
# def test_distributed(base_opts):
# opts = Namespace(**vars(base_opts))
# with ExitStack() as stack:
# cluster = \
# stack.enter_context(LocalCluster(processes=False, n_workers=4))
# client = stack.enter_context(Client(cluster))
# scheduler = cluster.scheduler
# opts.input_ms.time_chunk = 4
# opts.parallel = Namespace(scheduler='distributed',
# address=scheduler.address)
# opts._model_columns = ["MODEL_DATA"]
# datasets, _ = read_xds_list(opts)
# assert len(datasets) == 2
# assert len(datasets[0].chunks["row"]) == 29
# assert len(datasets[1].chunks["row"]) == 27
# client.run_on_scheduler(install_plugin)
# assert len(scheduler.plugins) > 1
# chan_freq = da.linspace(.856e9, 2*.856e9, 16, chunks=4)
# lm = da.random.random((4, 2), chunks=(2, 2))
# new_datasets = []
# for ds in datasets:
# K = phase_delay(lm, ds.UVW.data, chan_freq)
# partition = dataset_partition(ds)
# annotate(K, dims=("source", "row", "chan"), partition=partition)
# vis = K.sum(axis=0)[:, :, None]
# nds = xr.Dataset({"DATA": (("row", "chan", "corr"), vis)})
# nds.attrs.update(ds.attrs)
# new_datasets.append(nds)
# annotate(new_datasets)
# with dask.config.set(optimization__fuse__active=False):
# dask.compute(new_datasets)
|
py | b40bb59e2b30b13a644e0a87e93e15e0a3dd4200 | #!/usr/bin/env python
"""The setup script."""
from setuptools import find_packages, setup
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("CHANGELOG.rst") as history_file:
history = history_file.read()
requirements = [
"pandas",
"treelib",
"requests",
]
setup_requirements = [
"pytest-runner",
]
test_requirements = [
"pytest>=3",
]
setup(
author="Mika Pflüger",
author_email="[email protected]",
python_requires=">=3.5",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="Python wrapper around the Flexible Query API of the UNFCCC.",
install_requires=requirements,
license="Apache Software License 2.0",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="unfccc_di_api",
name="unfccc_di_api",
packages=find_packages(include=["unfccc_di_api", "unfccc_di_api.*"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/pik-primap/unfccc_di_api",
version="1.0.0",
zip_safe=False,
)
|
py | b40bb6675e9594c3346290967dff9deac1e6e031 | #! /usr/bin/env python
import sys
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
# this will be the name of the plot file
fn = sys.argv[1]
# Get name of the test
test_name = fn[:-9] # Could also be os.path.split(os.getcwd())[1]
# Run checksum regression test
checksumAPI.evaluate_checksum(test_name, fn)
|
py | b40bb6c20841483ac18b871600886abc4c01140c | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure_devtools.perfstress_tests import PerfStressTest
from azure.identity import EnvironmentCredential
from azure.identity.aio import EnvironmentCredential as AsyncEnvironmentCredential
from azure.keyvault.secrets import SecretClient
from azure.keyvault.secrets.aio import SecretClient as AsyncSecretClient
class GetSecretTest(PerfStressTest):
def __init__(self, arguments):
super().__init__(arguments)
# Auth configuration
self.credential = EnvironmentCredential()
self.async_credential = AsyncEnvironmentCredential()
# Create clients
vault_url = self.get_from_env("AZURE_KEYVAULT_URL")
self.client = SecretClient(vault_url, self.credential)
self.async_client = AsyncSecretClient(vault_url, self.async_credential)
async def global_setup(self):
"""The global setup is run only once."""
await super().global_setup()
await self.async_client.set_secret("livekvtestperfsecret", "secret-value")
async def global_cleanup(self):
"""The global cleanup is run only once."""
await self.async_client.delete_secret("livekvtestperfsecret")
await self.async_client.purge_deleted_secret("livekvtestperfsecret")
await super().global_cleanup()
async def close(self):
"""This is run after cleanup."""
await self.async_client.close()
await self.async_credential.close()
await super().close()
def run_sync(self):
"""The synchronous perf test."""
self.client.get_secret("livekvtestperfsecret")
async def run_async(self):
"""The asynchronous perf test."""
await self.async_client.get_secret("livekvtestperfsecret")
|
py | b40bb74f7993fda1dd802c692002947f60e3619b | # Generated by Django 3.0.3 on 2020-02-11 16:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('arts_core', '0005_auto_20200210_1706'),
]
operations = [
migrations.AlterField(
model_name='module',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='arts_core.Runtime'),
),
]
|
py | b40bb7718bfb059ebfe8175208a83474317fcfd4 | """
WSGI config for django_project_Vlasov project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_project_Vlasov.settings')
application = get_wsgi_application()
|
py | b40bb8536d3427918cf83fc1535cbf5dd93384db | #IMPORTS
import requests
import colorama
from colorama import Fore, Back, Style
#Styling
print(f'''{Fore.GREEN}
__ _______ ____ ____ ___ _ _ _ _ _____ ____ _____ ___
\ \ / / ____| __ ) / ___/ _ \| \ | | \ | | ____/ ___|_ _|_ _|
\ \ /\ / /| _| | _ \ _____| | | | | | \| | \| | _|| | | | | |
\ V V / | |___| |_) |_____| |__| |_| | |\ | |\ | |__| |___ | | | |
\_/\_/ |_____|____/ \____\___/|_| \_|_| \_|_____\____| |_| |___|
__ _____ _______ __ ____ _ _ _____ ____ _ _______ ____
\ \ / /_ _|_ _\ \ / / / ___| | | | ____/ ___| |/ / ____| _ \
\ \ / / | | | | \ V /____| | | |_| | _|| | | ' /| _| | |_) |
\ V / | | | | | |_____| |___| _ | |__| |___| . \| |___| _ <
\_/ |___| |_| |_| \____|_| |_|_____\____|_|\_\_____|_| \_\\
''')
url = input("Enter The URL of SITE You Wanna Check for Connectivity : ")
timeout = 5
while True:
try:
request = requests.get(url, timeout=timeout)
print('Connected')
except KeyboardInterrupt:
print("KeyBoard Interrupted Exiting!")
exit()
except:
print(f"{Fore.RED}Not Connected")
exit()
|
py | b40bb894ab2321943278ab431ef28e6c093fce36 | import threading
import numpy as np
from queue import Queue
from microtbs_rl.utils.common_utils import *
logger = logging.getLogger(os.path.basename(__file__))
class _MultiEnvWorker:
"""
Helper class for the MultiEnv.
Currently implemented with threads, and it's slow because of GIL.
It would be much better to implement this with multiprocessing.
"""
def __init__(self, idx, make_env_func):
self.idx = idx
self.env = make_env_func()
self.env.seed(idx)
self.observation = self.env.reset()
self.action_queue = Queue()
self.result_queue = Queue()
self.thread = threading.Thread(target=self.start)
self.thread.start()
def start(self):
while True:
action = self.action_queue.get()
if action is None: # stop signal
logger.info('Stop worker %d...', self.idx)
break
observation, reward, done, _ = self.env.step(action)
if done:
observation = self.env.reset()
self.result_queue.put((observation, reward, done))
self.action_queue.task_done()
class MultiEnv:
"""Run multiple gym-compatible environments in parallel, keeping more or less the same interface."""
def __init__(self, num_envs, make_env_func):
self.num_envs = num_envs
self.workers = [_MultiEnvWorker(i, make_env_func) for i in range(num_envs)]
self.action_space = self.workers[0].env.action_space
self.observation_space = self.workers[0].env.observation_space
self.curr_episode_reward = [0] * num_envs
self.episode_rewards = [[]] * num_envs
def initial_observations(self):
return [worker.observation for worker in self.workers]
def step(self, actions):
"""Obviously, returns vectors of obs, rewards, dones instead of usual single values."""
assert len(actions) == len(self.workers)
for worker, action in zip(self.workers, actions):
worker.action_queue.put(action)
results = []
for worker in self.workers:
worker.action_queue.join()
results.append(worker.result_queue.get())
observations, rewards, dones = zip(*results)
for i in range(self.num_envs):
self.curr_episode_reward[i] += rewards[i]
if dones[i]:
self.episode_rewards[i].append(self.curr_episode_reward[i])
self.curr_episode_reward[i] = 0
return observations, rewards, dones
def close(self):
logger.info('Stopping multi env...')
for worker in self.workers:
worker.action_queue.put(None) # terminate
worker.thread.join()
def calc_avg_rewards(self, n):
avg_reward = 0
for i in range(self.num_envs):
last_episodes_rewards = self.episode_rewards[i][-n:]
avg_reward += np.mean(last_episodes_rewards)
return avg_reward / float(self.num_envs)
|
py | b40bb996fa1fbebc5af2ceb0dd7017d56d2de5fc | """
Tools to open .py files as Unicode, using the encoding specified within the file,
as per PEP 263.
Much of the code is taken from the tokenize module in Python 3.2.
"""
from __future__ import absolute_import
import io
from io import TextIOWrapper, BytesIO
import re
import urllib
cookie_re = re.compile(ur"coding[:=]\s*([-\w.]+)", re.UNICODE)
cookie_comment_re = re.compile(ur"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE)
try:
# Available in Python 3
from tokenize import detect_encoding
except ImportError:
from codecs import lookup, BOM_UTF8
# Copied from Python 3.2 tokenize
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
# Copied from Python 3.2 tokenize
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
try:
# Available in Python 3.2 and above.
from tokenize import open
except ImportError:
# Copied from Python 3.2 tokenize
def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = io.open(filename, 'rb') # Tweaked to use io.open for Python 2
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
"""Converts a bytes string with python source code to unicode.
Unicode strings are passed through unchanged. Byte strings are checked
for the python source file encoding cookie to determine encoding.
txt can be either a bytes buffer or a string containing the source
code.
"""
if isinstance(txt, unicode):
return txt
if isinstance(txt, bytes):
buffer = BytesIO(txt)
else:
buffer = txt
try:
encoding, _ = detect_encoding(buffer.readline)
except SyntaxError:
encoding = "ascii"
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
text.mode = 'r'
if skip_encoding_cookie:
return u"".join(strip_encoding_cookie(text))
else:
return text.read()
def strip_encoding_cookie(filelike):
"""Generator to pull lines from a text-mode file, skipping the encoding
cookie if it is found in the first two lines.
"""
it = iter(filelike)
try:
first = next(it)
if not cookie_comment_re.match(first):
yield first
second = next(it)
if not cookie_comment_re.match(second):
yield second
except StopIteration:
return
for line in it:
yield line
def read_py_file(filename, skip_encoding_cookie=True):
"""Read a Python file, using the encoding declared inside the file.
Parameters
----------
filename : str
The path to the file to read.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output - compiling a
unicode string with an encoding declaration is a SyntaxError in Python 2.
Returns
-------
A unicode string containing the contents of the file.
"""
with open(filename) as f: # the open function defined in this module.
if skip_encoding_cookie:
return "".join(strip_encoding_cookie(f))
else:
return f.read()
def read_py_url(url, errors='replace', skip_encoding_cookie=True):
"""Read a Python file from a URL, using the encoding declared inside the file.
Parameters
----------
url : str
The URL from which to fetch the file.
errors : str
How to handle decoding errors in the file. Options are the same as for
bytes.decode(), but here 'replace' is the default.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output - compiling a
unicode string with an encoding declaration is a SyntaxError in Python 2.
Returns
-------
A unicode string containing the contents of the file.
"""
response = urllib.urlopen(url)
buffer = io.BytesIO(response.read())
return source_to_unicode(buffer, errors, skip_encoding_cookie)
def _list_readline(x):
"""Given a list, returns a readline() function that returns the next element
with each call.
"""
x = iter(x)
def readline():
return next(x)
return readline
|
py | b40bb9e1ab6c9cddb96674827d50945558d3fd07 | #!/usr/bin/env python2.7
from sys import exit, stdout, argv
from os import environ, system
environ['KERAS_BACKEND'] = 'tensorflow'
import numpy as np
import signal
from keras.layers import Input, Dense, Dropout, concatenate, LSTM, BatchNormalization, Conv1D, concatenate, CuDNNLSTM
from keras.models import Model
from keras.callbacks import ModelCheckpoint, LambdaCallback, TensorBoard
from keras.optimizers import Adam, SGD
from keras.utils import np_utils
from keras import backend as K
K.set_image_data_format('channels_last')
from subtlenet import config
from subtlenet.generators.gen import make_coll, generate, get_dims
import subtlenet.generators.gen as generator
from paths import basedir
'''
some global definitions
'''
NEPOCH = 50
generator.truncate = int(argv[1])
config.limit = int(argv[2])
APOSTLE = 'v4_trunc%i_limit%i'%(generator.truncate, config.limit)
system('cp %s particle_models/train_%s.py'%(argv[0], APOSTLE))
print 'training',APOSTLE
'''
instantiate data loaders
'''
top = make_coll(basedir + '/PARTITION/Top_*_CATEGORY.npy')
qcd = make_coll(basedir + '/PARTITION/QCD_*_CATEGORY.npy')
data = [top, qcd]
dims = get_dims(top)
'''
first build the classifier!
'''
# set up data
opts = {
'learn_mass' : True,
'learn_pt' : True,
}
classifier_train_gen = generate(data, partition='train', batch=500, **opts)
classifier_validation_gen = generate(data, partition='validate', batch=2000, **opts)
classifier_test_gen = generate(data, partition='test', batch=10, **opts)
test_i, test_o, test_w = next(classifier_test_gen)
# build all inputs
input_particles = Input(shape=(dims[1], dims[2]), name='input_particles')
input_mass = Input(shape=(1,), name='input_mass')
input_pt = Input(shape=(1,), name='input_pt')
inputs = [input_particles, input_mass, input_pt]
# now build the particle network
h = BatchNormalization(momentum=0.6, name='particles_input_bnorm')(input_particles)
h = Conv1D(32, 2, activation='relu', name='particles_conv0', kernel_initializer='lecun_uniform', padding='same')(h)
h = BatchNormalization(momentum=0.6, name='particles_conv0_bnorm')(h)
h = Conv1D(16, 4, activation='relu', name='particles_conv1', kernel_initializer='lecun_uniform', padding='same')(h)
h = BatchNormalization(momentum=0.6, name='particles_conv1_bnorm')(h)
h = CuDNNLSTM(100, name='particles_lstm')(h)
#h = Dropout(0.1)(h)
h = BatchNormalization(momentum=0.6, name='particles_lstm_norm')(h)
h = Dense(100, activation='relu',name='particles_lstm_dense',kernel_initializer='lecun_uniform')(h)
particles_final = BatchNormalization(momentum=0.6,name='particles_lstm_dense_norm')(h)
# merge everything
to_merge = [particles_final, input_mass, input_pt]
h = concatenate(to_merge)
for i in xrange(1,5):
h = Dense(50, activation='relu',name='final_dense%i'%i)(h)
# if i%2:
# h = Dropout(0.1)(h)
h = BatchNormalization(momentum=0.6,name='final_dense%i_norm'%i)(h)
y_hat = Dense(config.n_truth, activation='softmax')(h)
classifier = Model(inputs=inputs, outputs=[y_hat])
classifier.compile(optimizer=Adam(lr=0.0003, amsgrad=True),
loss='categorical_crossentropy',
metrics=['accuracy'])
print '########### CLASSIFIER ############'
classifier.summary()
print '###################################'
# ctrl+C now triggers a graceful exit
def save_classifier(name='classifier', model=classifier):
model.save('particle_models/%s_%s.h5'%(name, APOSTLE))
def save_and_exit(signal=None, frame=None, name='classifier', model=classifier):
save_classifier(name, model)
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
classifier.fit_generator(classifier_train_gen,
steps_per_epoch=3000,
epochs=NEPOCH,
validation_data=classifier_validation_gen,
validation_steps=2000,
callbacks = [ModelCheckpoint('particle_models/%s_%s_best.h5'%('classifier',APOSTLE), save_best_only=True, verbose=True)],
)
save_classifier()
|
py | b40bb9e9803af06aefc9b60f3724be1dcf327405 | """Slot load/parsing utility methods."""
import logging
import subprocess
import typing
from dataclasses import dataclass
from pathlib import Path
from .const import IntentsType, ReplacementsType
from .jsgf import Expression, Rule, Sentence, Sequence, SlotReference, walk_expression
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
@dataclass
class StaticSlotInfo:
"""Name/path to a static slot text file."""
name: str
path: Path
@dataclass
class SlotProgramInfo:
"""Name/path/arguments for a slot program."""
key: str
name: str
path: Path
args: typing.Optional[typing.List[str]] = None
# -----------------------------------------------------------------------------
def get_slot_replacements(
sentences: IntentsType,
slots_dirs: typing.Optional[typing.List[Path]] = None,
slot_programs_dirs: typing.Optional[typing.List[Path]] = None,
slot_visitor: typing.Optional[
typing.Callable[[Expression], typing.Union[bool, Expression]]
] = None,
) -> ReplacementsType:
"""Create replacement dictionary for referenced slots."""
replacements: ReplacementsType = {}
slots_dirs = slots_dirs or []
slot_programs_dirs = slot_programs_dirs or []
# Gather used slot names
slot_names: typing.Set[str] = set()
for intent_name in sentences:
for item in sentences[intent_name]:
for slot_name in get_slot_names(item):
slot_names.add(slot_name)
# Load slot values
for slot_key in slot_names:
if slot_key in replacements:
# Skip already loaded slot
continue
# Find slot file/program in file system
slot_info = find_slot(slot_key, slots_dirs, slot_programs_dirs)
slot_values: typing.List[Expression] = []
if isinstance(slot_info, StaticSlotInfo):
# Parse each non-empty line as a JSGF sentence
_LOGGER.debug("Loading slot %s from %s", slot_key, str(slot_info.path))
with open(slot_info.path, "r") as slot_file:
for line in slot_file:
line = line.strip()
if line:
sentence = Sentence.parse(line)
if slot_visitor:
walk_expression(sentence, slot_visitor)
slot_values.append(sentence)
elif isinstance(slot_info, SlotProgramInfo):
# Generate values in place
slot_command = [str(slot_info.path)] + (slot_info.args or [])
_LOGGER.debug("Running program for slot %s: %s", slot_key, slot_command)
# Parse each non-empty line as a JSGF sentence
has_output = False
for line in subprocess.check_output(
slot_command, universal_newlines=True
).splitlines():
line = line.strip()
if line:
has_output = True
sentence = Sentence.parse(line)
if slot_visitor:
walk_expression(sentence, slot_visitor)
slot_values.append(sentence)
assert has_output, f"No output from {slot_command}"
else:
_LOGGER.warning(
"Failed to load file/program for slot %s (tried: %s, %s)",
slot_key,
slots_dirs,
slot_programs_dirs,
)
# Replace $slot with sentences
replacements[f"${slot_key}"] = slot_values
return replacements
# -----------------------------------------------------------------------------
def get_slot_names(item: typing.Union[Expression, Rule]) -> typing.Iterable[str]:
"""Yield referenced slot names from an expression."""
if isinstance(item, SlotReference):
yield item.slot_name
elif isinstance(item, Sequence):
for sub_item in item.items:
for slot_name in get_slot_names(sub_item):
yield slot_name
elif isinstance(item, Rule):
for slot_name in get_slot_names(item.rule_body):
yield slot_name
def split_slot_args(
slot_name: str,
) -> typing.Tuple[str, typing.Optional[typing.List[str]]]:
"""Split slot name and arguments out (slot,arg1,arg2,...)"""
# Check for arguments.
slot_args: typing.Optional[typing.List[str]] = None
# Slot name retains argument(s).
if "," in slot_name:
slot_name, *slot_args = slot_name.split(",")
return slot_name, slot_args
# -----------------------------------------------------------------------------
def find_slot(
slot_key: str, slots_dirs: typing.List[Path], slot_programs_dirs: typing.List[Path]
) -> typing.Optional[typing.Union[StaticSlotInfo, SlotProgramInfo]]:
"""Look up a static slot or slot program."""
# Try static user slots
for slots_dir in slots_dirs:
slot_path = slots_dir / slot_key
if slot_path.is_file():
return StaticSlotInfo(name=slot_key, path=slot_path)
# Try user slot programs
slot_name, slot_args = split_slot_args(slot_key)
for slot_programs_dir in slot_programs_dirs:
slot_path = slot_programs_dir / slot_name
if slot_path.is_file():
return SlotProgramInfo(
key=slot_key, name=slot_name, path=slot_path, args=slot_args
)
return None
|
py | b40bbb0c7e917fa13ea91d7da0659e9824cfbdf0 | # -*- coding: UTF-8 -*-
from datetime import *
import os
import os.path
import re
date_time = date.today()
webtitle = input('Input Your Blog Title On Website: ')
strText = '''---
layout: post
title: "笔记"
subtitle: ""
date: %s
author: "brucechen"
header-img: "img/post-bg-java.jpg"
published: false
tags:
- Java
- 读书笔记
---''' % date_time.strftime('%Y-%m-%d')
print(date_time.strftime('%Y-%m-%d'))
f = open('%s-%s.markdown'% (date_time.strftime('%Y-%m-%d'), webtitle), mode='w', encoding='UTF-8')
f.write(strText)
f.close()
print(strText) |
py | b40bbba5e9dcfab7212216a62bbe022b5894bb3c | import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, normal_init, xavier_init
from ..builder import build_loss
from ..registry import NECKS
class Identity(nn.Module):
"""Identity mapping."""
def forward(self, x):
return x
class DownSample(nn.Module):
"""DownSample modules.
It uses convolution and maxpooling to downsample the input feature,
and specifies downsample position to determine `pool-conv` or `conv-pool`.
Args:
in_channels (int): Channel number of input features.
out_channels (int): Channel number of output feature.
kernel_size (int | tuple[int]): Same as :class:`ConvModule`.
Default: (3, 1, 1).
stride (int | tuple[int]): Same as :class:`ConvModule`.
Default: (1, 1, 1).
padding (int | tuple[int]): Same as :class:`ConvModule`.
Default: (1, 0, 0).
groups (int): Same as :class:`ConvModule`. Default: 1.
bias (bool | str): Same as :class:`ConvModule`. Default: False.
conv_cfg (dict | None): Same as :class:`ConvModule`.
Default: dict(type='Conv3d').
norm_cfg (dict | None): Same as :class:`ConvModule`. Default: None.
act_cfg (dict | None): Same as :class:`ConvModule`. Default: None.
downsample_position (str): Type of downsample position. Options are
'before' and 'after'. Default: 'after'.
downsample_scale (int | tuple[int]): downsample scale for maxpooling.
It will be used for kernel size and stride of maxpooling.
Default: (1, 2, 2).
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=(3, 1, 1),
stride=(1, 1, 1),
padding=(1, 0, 0),
groups=1,
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=None,
act_cfg=None,
downsample_position='after',
downsample_scale=(1, 2, 2)):
super().__init__()
self.conv = ConvModule(
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=groups,
bias=bias,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
assert downsample_position in ['before', 'after']
self.downsample_position = downsample_position
self.pool = nn.MaxPool3d(
downsample_scale, downsample_scale, (0, 0, 0), ceil_mode=True)
def forward(self, x):
if self.downsample_position == 'before':
x = self.pool(x)
x = self.conv(x)
else:
x = self.conv(x)
x = self.pool(x)
return x
class LevelFusion(nn.Module):
"""Level Fusion module.
This module is used to aggregate the hierarchical features dynamic in
visual tempos and consistent in spatial semantics. The top/bottom features
for top-down/bottom-up flow would be combined to achieve two additional
options, namely 'Cascade Flow' or 'Parallel Flow'. While applying a
bottom-up flow after a top-down flow will lead to the cascade flow,
applying them simultaneously will result in the parallel flow.
Args:
in_channels (tuple[int]): Channel numbers of input features tuple.
mid_channels (tuple[int]): Channel numbers of middle features tuple.
out_channels (int): Channel numbers of output features.
downsample_scales (tuple[int | tuple[int]]): downsample scales for
each :class:`DownSample` module. Default: ((1, 1, 1), (1, 1, 1)).
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
downsample_scales=((1, 1, 1), (1, 1, 1))):
super().__init__()
num_stages = len(in_channels)
self.downsamples = nn.ModuleList()
for i in range(num_stages):
downsample = DownSample(
in_channels[i],
mid_channels[i],
kernel_size=(1, 1, 1),
stride=(1, 1, 1),
bias=False,
padding=(0, 0, 0),
groups=32,
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
downsample_position='before',
downsample_scale=downsample_scales[i])
self.downsamples.append(downsample)
self.fusion_conv = ConvModule(
sum(mid_channels),
out_channels,
1,
stride=1,
padding=0,
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True))
def forward(self, x):
out = [self.downsamples[i](feature) for i, feature in enumerate(x)]
out = torch.cat(out, 1)
out = self.fusion_conv(out)
return out
class SpatialModulation(nn.Module):
"""Spatial Semantic Modulation.
This module is used to align spatial semantics of features in the
multi-depth pyramid. For each but the top-level feature, a stack
of convolutions with level-specific stride are applied to it, matching
its spatial shape and receptive field with the top one.
Args:
in_channels (tuple[int]): Channel numbers of input features tuple.
out_channels (int): Channel numbers of output features tuple.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.spatial_modulation = nn.ModuleList()
for channel in in_channels:
downsample_scale = out_channels // channel
downsample_factor = int(np.log2(downsample_scale))
op = nn.ModuleList()
if downsample_factor < 1:
op = Identity()
else:
for factor in range(downsample_factor):
in_factor = 2**factor
out_factor = 2**(factor + 1)
op.append(
ConvModule(
channel * in_factor,
channel * out_factor, (1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True)))
self.spatial_modulation.append(op)
def forward(self, x):
out = []
for i, feature in enumerate(x):
if isinstance(self.spatial_modulation[i], nn.ModuleList):
out_ = x[i]
for op in self.spatial_modulation[i]:
out_ = op(out_)
out.append(out_)
else:
out.append(self.spatial_modulation[i](x[i]))
return out
class AuxHead(nn.Module):
"""Auxiliary Head.
This auxiliary head is appended to receive stronger supervision,
leading to enhanced semantics.
Args:
in_channels (int): Channel number of input features.
out_channels (int): Channel number of output features.
loss_weight (float): weight of loss for the auxiliary head.
Default: 0.5.
loss_cls (dict): loss_cls (dict): Config for building loss.
Default: ``dict(type='CrossEntropyLoss')``.
"""
def __init__(self,
in_channels,
out_channels,
loss_weight=0.5,
loss_cls=dict(type='CrossEntropyLoss')):
super().__init__()
self.conv = ConvModule(
in_channels,
in_channels * 2, (1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True))
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.loss_weight = loss_weight
self.dropout = nn.Dropout(p=0.5)
self.fc = nn.Linear(in_channels * 2, out_channels)
self.loss_cls = build_loss(loss_cls)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
normal_init(m, std=0.01)
if isinstance(m, nn.Conv3d):
xavier_init(m, distribution='uniform')
if isinstance(m, nn.BatchNorm3d):
constant_init(m, 1)
def forward(self, x, target=None):
losses = dict()
if target is None:
return losses
x = self.conv(x)
x = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1)
x = self.dropout(x)
x = self.fc(x)
if target.shape == torch.Size([]):
target = target.unsqueeze(0)
losses['loss_aux'] = self.loss_weight * self.loss_cls(x, target)
return losses
class TemporalModulation(nn.Module):
"""Temporal Rate Modulation.
The module is used to equip TPN with a similar flexibility for temporal
tempo modulation as in the input-level frame pyramid.
Args:
in_channels (int): Channel number of input features.
out_channels (int): Channel number of output features.
downsample_scale (int): Downsample scale for maxpooling. Default: 8.
"""
def __init__(self, in_channels, out_channels, downsample_scale=8):
super().__init__()
self.conv = ConvModule(
in_channels,
out_channels, (3, 1, 1),
stride=(1, 1, 1),
padding=(1, 0, 0),
bias=False,
groups=32,
conv_cfg=dict(type='Conv3d'),
act_cfg=None)
self.pool = nn.MaxPool3d((downsample_scale, 1, 1),
(downsample_scale, 1, 1), (0, 0, 0),
ceil_mode=True)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
@NECKS.register_module()
class TPN(nn.Module):
"""TPN neck.
This module is proposed in `Temporal Pyramid Network for Action Recognition
<https://arxiv.org/pdf/2004.03548.pdf>`_
Args:
in_channels (tuple[int]): Channel numbers of input features tuple.
out_channels (int): Channel number of output feature.
spatial_modulation_cfg (dict | None): Config for spatial modulation
layers. Required keys are `in_channels` and `out_channels`.
Default: None.
temporal_modulation_cfg (dict | None): Config for temporal modulation
layers. Default: None.
upsample_cfg (dict | None): Config for upsample layers. The keys are
same as that in :class:``nn.Upsample``. Default: None.
downsample_cfg (dict | None): Config for downsample layers.
Default: None.
level_fusion_cfg (dict | None): Config for level fusion layers.
Required keys are 'in_channels', 'mid_channels', 'out_channels'.
Default: None.
aux_head_cfg (dict | None): Config for aux head layers.
Required keys are 'out_channels'. Default: None.
flow_type (str): Flow type to combine the features. Options are
'cascade' and 'parallel'. Default: 'cascade'.
"""
def __init__(self,
in_channels,
out_channels,
spatial_modulation_cfg=None,
temporal_modulation_cfg=None,
upsample_cfg=None,
downsample_cfg=None,
level_fusion_cfg=None,
aux_head_cfg=None,
flow_type='cascade'):
super().__init__()
assert isinstance(in_channels, tuple)
assert isinstance(out_channels, int)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_tpn_stages = len(in_channels)
assert spatial_modulation_cfg is None or isinstance(
spatial_modulation_cfg, dict)
assert temporal_modulation_cfg is None or isinstance(
temporal_modulation_cfg, dict)
assert upsample_cfg is None or isinstance(upsample_cfg, dict)
assert downsample_cfg is None or isinstance(downsample_cfg, dict)
assert aux_head_cfg is None or isinstance(aux_head_cfg, dict)
assert level_fusion_cfg is None or isinstance(level_fusion_cfg, dict)
if flow_type not in ['cascade', 'parallel']:
raise ValueError(
f"flow type in TPN should be 'cascade' or 'parallel', "
f'but got {flow_type} instead.')
self.flow_type = flow_type
self.temporal_modulation_ops = nn.ModuleList()
self.upsample_ops = nn.ModuleList()
self.downsample_ops = nn.ModuleList()
self.level_fusion_1 = LevelFusion(**level_fusion_cfg)
self.spatial_modulation = SpatialModulation(**spatial_modulation_cfg)
for i in range(self.num_tpn_stages):
if temporal_modulation_cfg is not None:
downsample_scale = temporal_modulation_cfg[
'downsample_scales'][i]
temporal_modulation = TemporalModulation(
in_channels[-1], out_channels, downsample_scale)
self.temporal_modulation_ops.append(temporal_modulation)
if i < self.num_tpn_stages - 1:
if upsample_cfg is not None:
upsample = nn.Upsample(**upsample_cfg)
self.upsample_ops.append(upsample)
if downsample_cfg is not None:
downsample = DownSample(out_channels, out_channels,
**downsample_cfg)
self.downsample_ops.append(downsample)
out_dims = level_fusion_cfg['out_channels']
# two pyramids
self.level_fusion_2 = LevelFusion(**level_fusion_cfg)
self.pyramid_fusion = ConvModule(
out_dims * 2,
2048,
1,
stride=1,
padding=0,
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True))
if aux_head_cfg is not None:
self.aux_head = AuxHead(self.in_channels[-2], **aux_head_cfg)
else:
self.aux_head = None
self.init_weights()
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
xavier_init(m, distribution='uniform')
if isinstance(m, nn.BatchNorm3d):
constant_init(m, 1)
if self.aux_head is not None:
self.aux_head.init_weights()
def forward(self, x, target=None):
loss_aux = dict()
# Auxiliary loss
if self.aux_head is not None:
loss_aux = self.aux_head(x[-2], target)
# Spatial Modulation
spatial_modulation_outs = self.spatial_modulation(x)
# Temporal Modulation
temporal_modulation_outs = []
for i, temporal_modulation in enumerate(self.temporal_modulation_ops):
temporal_modulation_outs.append(
temporal_modulation(spatial_modulation_outs[i]))
outs = [out.clone() for out in temporal_modulation_outs]
if len(self.upsample_ops) != 0:
for i in range(self.num_tpn_stages - 1, 0, -1):
outs[i - 1] = outs[i - 1] + self.upsample_ops[i - 1](outs[i])
# Get top-down outs
top_down_outs = self.level_fusion_1(outs)
# Build bottom-up flow using downsample operation
if self.flow_type == 'cascade':
outs = outs
else:
outs = [out.clone() for out in temporal_modulation_outs]
if len(self.downsample_ops) != 0:
for i in range(self.num_tpn_stages - 1):
outs[i + 1] = outs[i + 1] + self.downsample_ops[i](outs[i])
# Get bottom-up outs
botton_up_outs = self.level_fusion_2(outs)
# fuse two pyramid outs
outs = self.pyramid_fusion(
torch.cat([top_down_outs, botton_up_outs], 1))
return outs, loss_aux
|
py | b40bbbf2697ca4a19dafddd42ee6c4db2d6ca520 | """PyTorch SelecSLS Net example for ImageNet Classification
License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode)
Author: Dushyant Mehta (@mehtadushy)
SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D
Human Pose Estimation with a Single RGB Camera, Mehta et al."
https://arxiv.org/abs/1907.00837
Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models
and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch
"""
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import create_classifier
from .registry import register_model
__all__ = ['SelecSLS'] # model_registry will add each entrypoint fn to this
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'fc',
**kwargs
}
default_cfgs = {
'selecsls42': _cfg(
url='',
interpolation='bicubic'),
'selecsls42b': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth',
interpolation='bicubic'),
'selecsls60': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth',
interpolation='bicubic'),
'selecsls60b': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth',
interpolation='bicubic'),
'selecsls84': _cfg(
url='',
interpolation='bicubic'),
}
class SequentialList(nn.Sequential):
def __init__(self, *args):
super(SequentialList, self).__init__(*args)
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (List[torch.Tensor])
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> (List[torch.Tensor])
pass
def forward(self, x) -> List[torch.Tensor]:
for module in self:
x = module(x)
return x
class SelectSeq(nn.Module):
def __init__(self, mode='index', index=0):
super(SelectSeq, self).__init__()
self.mode = mode
self.index = index
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (torch.Tensor)
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (Tuple[torch.Tensor]) -> (torch.Tensor)
pass
def forward(self, x) -> torch.Tensor:
if self.mode == 'index':
return x[self.index]
else:
return torch.cat(x, dim=1)
def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1):
if padding is None:
padding = ((stride - 1) + dilation * (k - 1)) // 2
return nn.Sequential(
nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_chs),
nn.ReLU(inplace=True)
)
class SelecSLSBlock(nn.Module):
def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1):
super(SelecSLSBlock, self).__init__()
self.stride = stride
self.is_first = is_first
assert stride in [1, 2]
# Process input with 4 conv blocks with the same number of input and output channels
self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation)
self.conv2 = conv_bn(mid_chs, mid_chs, 1)
self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3)
self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1)
self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3)
self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1)
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
if not isinstance(x, list):
x = [x]
assert len(x) in [1, 2]
d1 = self.conv1(x[0])
d2 = self.conv3(self.conv2(d1))
d3 = self.conv5(self.conv4(d2))
if self.is_first:
out = self.conv6(torch.cat([d1, d2, d3], 1))
return [out, out]
else:
return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]]
class SelecSLS(nn.Module):
"""SelecSLS42 / SelecSLS60 / SelecSLS84
Parameters
----------
cfg : network config dictionary specifying block type, feature, and head args
num_classes : int, default 1000
Number of classification classes.
in_chans : int, default 3
Number of input (color) channels.
drop_rate : float, default 0.
Dropout probability before classifier, for training
global_pool : str, default 'avg'
Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax'
"""
def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'):
self.num_classes = num_classes
self.drop_rate = drop_rate
super(SelecSLS, self).__init__()
self.stem = conv_bn(in_chans, 32, stride=2)
self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']])
self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way
self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']])
self.num_features = cfg['num_features']
self.feature_info = cfg['feature_info']
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
for n, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.)
nn.init.constant_(m.bias, 0.)
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.features(x)
x = self.head(self.from_seq(x))
return x
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.fc(x)
return x
def _create_selecsls(variant, pretrained, **kwargs):
cfg = {}
feature_info = [dict(num_chs=32, reduction=2, module='stem.2')]
if variant.startswith('selecsls42'):
cfg['block'] = SelecSLSBlock
# Define configuration of the network after the initial neck
cfg['features'] = [
# in_chs, skip_chs, mid_chs, out_chs, is_first, stride
(32, 0, 64, 64, True, 2),
(64, 64, 64, 128, False, 1),
(128, 0, 144, 144, True, 2),
(144, 144, 144, 288, False, 1),
(288, 0, 304, 304, True, 2),
(304, 304, 304, 480, False, 1),
]
feature_info.extend([
dict(num_chs=128, reduction=4, module='features.1'),
dict(num_chs=288, reduction=8, module='features.3'),
dict(num_chs=480, reduction=16, module='features.5'),
])
# Head can be replaced with alternative configurations depending on the problem
feature_info.append(dict(num_chs=1024, reduction=32, module='head.1'))
if variant == 'selecsls42b':
cfg['head'] = [
(480, 960, 3, 2),
(960, 1024, 3, 1),
(1024, 1280, 3, 2),
(1280, 1024, 1, 1),
]
feature_info.append(dict(num_chs=1024, reduction=64, module='head.3'))
cfg['num_features'] = 1024
else:
cfg['head'] = [
(480, 960, 3, 2),
(960, 1024, 3, 1),
(1024, 1024, 3, 2),
(1024, 1280, 1, 1),
]
feature_info.append(dict(num_chs=1280, reduction=64, module='head.3'))
cfg['num_features'] = 1280
elif variant.startswith('selecsls60'):
cfg['block'] = SelecSLSBlock
# Define configuration of the network after the initial neck
cfg['features'] = [
# in_chs, skip_chs, mid_chs, out_chs, is_first, stride
(32, 0, 64, 64, True, 2),
(64, 64, 64, 128, False, 1),
(128, 0, 128, 128, True, 2),
(128, 128, 128, 128, False, 1),
(128, 128, 128, 288, False, 1),
(288, 0, 288, 288, True, 2),
(288, 288, 288, 288, False, 1),
(288, 288, 288, 288, False, 1),
(288, 288, 288, 416, False, 1),
]
feature_info.extend([
dict(num_chs=128, reduction=4, module='features.1'),
dict(num_chs=288, reduction=8, module='features.4'),
dict(num_chs=416, reduction=16, module='features.8'),
])
# Head can be replaced with alternative configurations depending on the problem
feature_info.append(dict(num_chs=1024, reduction=32, module='head.1'))
if variant == 'selecsls60b':
cfg['head'] = [
(416, 756, 3, 2),
(756, 1024, 3, 1),
(1024, 1280, 3, 2),
(1280, 1024, 1, 1),
]
feature_info.append(dict(num_chs=1024, reduction=64, module='head.3'))
cfg['num_features'] = 1024
else:
cfg['head'] = [
(416, 756, 3, 2),
(756, 1024, 3, 1),
(1024, 1024, 3, 2),
(1024, 1280, 1, 1),
]
feature_info.append(dict(num_chs=1280, reduction=64, module='head.3'))
cfg['num_features'] = 1280
elif variant == 'selecsls84':
cfg['block'] = SelecSLSBlock
# Define configuration of the network after the initial neck
cfg['features'] = [
# in_chs, skip_chs, mid_chs, out_chs, is_first, stride
(32, 0, 64, 64, True, 2),
(64, 64, 64, 144, False, 1),
(144, 0, 144, 144, True, 2),
(144, 144, 144, 144, False, 1),
(144, 144, 144, 144, False, 1),
(144, 144, 144, 144, False, 1),
(144, 144, 144, 304, False, 1),
(304, 0, 304, 304, True, 2),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 512, False, 1),
]
feature_info.extend([
dict(num_chs=144, reduction=4, module='features.1'),
dict(num_chs=304, reduction=8, module='features.6'),
dict(num_chs=512, reduction=16, module='features.12'),
])
# Head can be replaced with alternative configurations depending on the problem
cfg['head'] = [
(512, 960, 3, 2),
(960, 1024, 3, 1),
(1024, 1024, 3, 2),
(1024, 1280, 3, 1),
]
cfg['num_features'] = 1280
feature_info.extend([
dict(num_chs=1024, reduction=32, module='head.1'),
dict(num_chs=1280, reduction=64, module='head.3')
])
else:
raise ValueError('Invalid net configuration ' + variant + ' !!!')
cfg['feature_info'] = feature_info
# this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises?
return build_model_with_cfg(
SelecSLS, variant, pretrained,
default_cfg=default_cfgs[variant],
model_cfg=cfg,
feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True),
**kwargs)
@register_model
def selecsls42(pretrained=False, **kwargs):
"""Constructs a SelecSLS42 model.
"""
return _create_selecsls('selecsls42', pretrained, **kwargs)
@register_model
def selecsls42b(pretrained=False, **kwargs):
"""Constructs a SelecSLS42_B model.
"""
return _create_selecsls('selecsls42b', pretrained, **kwargs)
@register_model
def selecsls60(pretrained=False, **kwargs):
"""Constructs a SelecSLS60 model.
"""
return _create_selecsls('selecsls60', pretrained, **kwargs)
@register_model
def selecsls60b(pretrained=False, **kwargs):
"""Constructs a SelecSLS60_B model.
"""
return _create_selecsls('selecsls60b', pretrained, **kwargs)
@register_model
def selecsls84(pretrained=False, **kwargs):
"""Constructs a SelecSLS84 model.
"""
return _create_selecsls('selecsls84', pretrained, **kwargs)
|
py | b40bbd6c41df0df2899368b3f59d8c19ee019739 | from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:2222")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
|
py | b40bbe2624b71fa598e02e47b50ae20c7e8d84c0 | #! /usr/bin/python
#filename build_server.py
import os
import sys
import zipfile
z = zipfile.ZipFile('server.zip', 'w')
count = 0
# Open the file with filelist.
with open('server.manifest') as f:
files = f.read().splitlines()
# Iterate over the lines, each line represents a file name.
for file in files:
count = count + 1
z.write(os.path.normpath(file), os.path.normpath(file), compress_type = zipfile.ZIP_DEFLATED)
z.close()
print('{} files compressed to server.zip'.format(count)) |
py | b40bbe5d528623ea4f342494c1a7c10b4879eef7 | # coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from datadog_api_client.v1.model.synthetics_api_test_result_data import SyntheticsAPITestResultData
from datadog_api_client.v1.model.synthetics_api_test_result_full_check import SyntheticsAPITestResultFullCheck
from datadog_api_client.v1.model.synthetics_test_monitor_status import SyntheticsTestMonitorStatus
globals()['SyntheticsAPITestResultData'] = SyntheticsAPITestResultData
globals()['SyntheticsAPITestResultFullCheck'] = SyntheticsAPITestResultFullCheck
globals()['SyntheticsTestMonitorStatus'] = SyntheticsTestMonitorStatus
class SyntheticsAPITestResultFull(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'check': (SyntheticsAPITestResultFullCheck,), # noqa: E501
'check_time': (float,), # noqa: E501
'check_version': (int,), # noqa: E501
'probe_dc': (str,), # noqa: E501
'result': (SyntheticsAPITestResultData,), # noqa: E501
'result_id': (str,), # noqa: E501
'status': (SyntheticsTestMonitorStatus,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'check': 'check', # noqa: E501
'check_time': 'check_time', # noqa: E501
'check_version': 'check_version', # noqa: E501
'probe_dc': 'probe_dc', # noqa: E501
'result': 'result', # noqa: E501
'result_id': 'result_id', # noqa: E501
'status': 'status', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SyntheticsAPITestResultFull - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
check (SyntheticsAPITestResultFullCheck): [optional] # noqa: E501
check_time (float): When the API test was conducted.. [optional] # noqa: E501
check_version (int): Version of the API test used.. [optional] # noqa: E501
probe_dc (str): Locations for which to query the API test results.. [optional] # noqa: E501
result (SyntheticsAPITestResultData): [optional] # noqa: E501
result_id (str): ID of the API test result.. [optional] # noqa: E501
status (SyntheticsTestMonitorStatus): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
py | b40bbe92f021772f003ebd6ac14d1a0642b80d97 | from __future__ import unicode_literals
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views import generic
from .models import Artist, Author, Book, BookSigning, Page
from .test_forms import AuthorForm, ContactForm
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
context = super(CustomTemplateView, self).get_context_data(**kwargs)
context.update({'key': 'value'})
return context
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
"""A ListView that doesn't use a model."""
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'first': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class ArtistList(generic.ListView):
template_name = 'generic_views/list.html'
queryset = Artist.objects.all()
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class BookList(generic.ListView):
model = Book
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class AuthorListCustomPaginator(AuthorList):
paginate_by = 5
def get_paginator(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
return super(AuthorListCustomPaginator, self).get_paginator(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class ContactView(generic.FormView):
form_class = ContactForm
success_url = reverse_lazy('authors_list')
template_name = 'generic_views/form.html'
class ArtistCreate(generic.CreateView):
model = Artist
fields = '__all__'
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
fields = '__all__'
class TemplateResponseWithoutTemplate(generic.detail.SingleObjectTemplateResponseMixin, generic.View):
# we don't define the usual template_name here
def __init__(self):
# Dummy object, but attr is required by get_template_name()
self.object = None
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
fields = '__all__'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id])
class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
fields = '__all__'
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
fields = '__all__'
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
fields = '__all__'
class OneAuthorUpdate(generic.UpdateView):
success_url = '/list/authors/'
fields = '__all__'
def get_object(self):
return Author.objects.get(pk=1)
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
success_url = reverse_lazy('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass
class AuthorGetQuerySetFormView(generic.edit.ModelFormMixin):
fields = '__all__'
def get_queryset(self):
return Author.objects.all()
class BookDetailGetObjectCustomQueryset(BookDetail):
def get_object(self, queryset=None):
return super(BookDetailGetObjectCustomQueryset, self).get_object(
queryset=Book.objects.filter(pk=self.kwargs['pk']))
class CustomMultipleObjectMixinView(generic.list.MultipleObjectMixin, generic.View):
queryset = [
{'name': 'John'},
{'name': 'Yoko'},
]
def get(self, request):
self.object_list = self.get_queryset()
class CustomContextView(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name='dummy')
def get_object(self):
return Book(name="dummy")
def get_context_data(self, **kwargs):
context = {'custom_key': 'custom_value'}
context.update(kwargs)
return super(CustomContextView, self).get_context_data(**context)
def get_context_object_name(self, obj):
return "test_name"
class CustomSingleObjectView(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name="dummy")
class BookSigningConfig(object):
model = BookSigning
date_field = 'event_date'
# use the same templates as for books
def get_template_names(self):
return ['generic_views/book%s.html' % self.template_name_suffix]
class BookSigningArchive(BookSigningConfig, generic.ArchiveIndexView):
pass
class BookSigningYearArchive(BookSigningConfig, generic.YearArchiveView):
pass
class BookSigningMonthArchive(BookSigningConfig, generic.MonthArchiveView):
pass
class BookSigningWeekArchive(BookSigningConfig, generic.WeekArchiveView):
pass
class BookSigningDayArchive(BookSigningConfig, generic.DayArchiveView):
pass
class BookSigningTodayArchive(BookSigningConfig, generic.TodayArchiveView):
pass
class BookSigningDetail(BookSigningConfig, generic.DateDetailView):
context_object_name = 'book'
class NonModel(object):
id = "non_model_1"
_meta = None
class NonModelDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
model = NonModel
def get_object(self, queryset=None):
return NonModel()
class ObjectDoesNotExistDetail(generic.DetailView):
def get_queryset(self):
return Book.does_not_exist.all()
|
py | b40bbf1ffb3e877d77177545b107e0e36c6d2e0e | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Finetuning Callback
^^^^^^^^^^^^^^^^^^^^
Freeze and unfreeze models for finetuning purposes
"""
import logging
from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Union
import torch
from torch.nn import Module
from torch.nn.modules.batchnorm import _BatchNorm
from torch.optim.optimizer import Optimizer
import pytorch_lightning as pl
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
log = logging.getLogger(__name__)
def multiplicative(epoch):
return 2
class BaseFinetuning(Callback):
r"""
This class implements the base logic for writing your own Finetuning Callback.
Override ``freeze_before_training`` and ``finetune_function`` methods with your own logic.
``freeze_before_training``: This method is called before ``configure_optimizers``
and should be used to freeze any modules parameters.
``finetune_function``: This method is called on every train epoch start and should be used to
``unfreeze`` any parameters. Those parameters needs to be added in a new ``param_group``
within the optimizer.
.. note:: Make sure to filter the parameters based on ``requires_grad``.
Example::
class MyModel(LightningModule)
...
def configure_optimizer(self):
# Make sure to filter the parameters based on `requires_grad`
return Adam(filter(lambda p: p.requires_grad, self.parameters))
class FeatureExtractorFreezeUnfreeze(BaseFinetuning):
def __init__(self, unfreeze_at_epoch=10)
self._unfreeze_at_epoch = unfreeze_at_epoch
def freeze_before_training(self, pl_module):
# freeze any module you want
# Here, we are freezing ``feature_extractor``
self.freeze(pl_module.feature_extractor)
def finetune_function(self, pl_module, current_epoch, optimizer, optimizer_idx):
# When `current_epoch` is 10, feature_extractor will start training.
if current_epoch == self._unfreeze_at_epoch:
self.unfreeze_and_add_param_group(
modules=pl_module.feature_extractor,
optimizer=optimizer,
train_bn=True,
)
"""
def __init__(self):
self._internal_state: Dict[int, List[Dict[str, Any]]] = {}
def on_save_checkpoint(
self,
trainer: 'pl.Trainer',
pl_module: 'pl.LightningModule',
checkpoint: Dict[str, Any],
) -> Dict[int, List[Dict[str, Any]]]:
return self._internal_state
def on_load_checkpoint(
self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule', callback_state: Dict[int, List[Dict[str, Any]]]
) -> None:
self._internal_state = callback_state
# restore the param_groups created during the previous training.
named_parameters = dict(pl_module.named_parameters())
for opt_idx, optimizer in enumerate(trainer.optimizers):
param_groups = self.__apply_mapping_to_param_groups(self._internal_state[opt_idx], named_parameters)
optimizer.param_groups = param_groups
@staticmethod
def flatten_modules(modules: Union[Module, Iterable[Union[Module, Iterable]]]) -> List[Module]:
"""
This function is used to flatten a module or an iterable of modules into a list of its leaf modules (modules
with no children) and parent modules that have parameters directly themselves.
Args:
modules: A given module or an iterable of modules
Returns:
List of modules
"""
if isinstance(modules, Iterable):
_modules = []
for m in modules:
_modules.extend(BaseFinetuning.flatten_modules(m))
else:
_modules = modules.modules()
# Capture all leaf modules as well as parent modules that have parameters directly themsleves
return [m for m in _modules if not list(m.children()) or m._parameters]
@staticmethod
def filter_params(
modules: Union[Module, Iterable[Union[Module, Iterable]]],
train_bn: bool = True,
requires_grad: bool = True
) -> Generator:
"""Yields the `requires_grad` parameters of a given module or list of modules.
Args:
modules: A given module or an iterable of modules
train_bn: Whether to train BatchNorm module
requires_grad: Whether to create a generator for trainable or non-trainable parameters.
Returns:
Generator
"""
modules = BaseFinetuning.flatten_modules(modules)
for mod in modules:
if isinstance(mod, _BatchNorm) and not train_bn:
continue
# recursion could yield duplicate parameters for parent modules w/ parameters so disabling it
for param in mod.parameters(recurse=False):
if param.requires_grad == requires_grad:
yield param
@staticmethod
def make_trainable(modules: Union[Module, Iterable[Union[Module, Iterable]]]) -> None:
"""
Unfreezes the parameters of the provided modules
Args:
modules: A given module or an iterable of modules
"""
modules = BaseFinetuning.flatten_modules(modules)
for module in modules:
# recursion could yield duplicate parameters for parent modules w/ parameters so disabling it
for param in module.parameters(recurse=False):
param.requires_grad = True
@staticmethod
def freeze(modules: Union[Module, Iterable[Union[Module, Iterable]]], train_bn: bool = True) -> None:
"""
Freezes the parameters of the provided modules
Args:
modules: A given module or an iterable of modules
train_bn: If True, leave the BatchNorm layers in training mode
Returns:
None
"""
modules = BaseFinetuning.flatten_modules(modules)
for mod in modules:
if isinstance(mod, _BatchNorm) and train_bn:
BaseFinetuning.make_trainable(mod)
else:
# recursion could yield duplicate parameters for parent modules w/ parameters so disabling it
for param in mod.parameters(recurse=False):
param.requires_grad = False
@staticmethod
def filter_on_optimizer(optimizer: Optimizer, params: Iterable) -> List:
"""
This function is used to exclude any parameter which already exists in
this optimizer
Args:
optimizer: Optimizer used for parameter exclusion
params: Iterable of parameters used to check against the provided optimizer
Returns:
List of parameters not contained in this optimizer param groups
"""
out_params = []
removed_params = []
for param in params:
if not any(torch.equal(p, param) for group in optimizer.param_groups for p in group["params"]):
out_params.append(param)
else:
removed_params.append(param)
if removed_params:
rank_zero_warn(
"The provided params to be freezed already exist within another group of this optimizer."
" Those parameters will be skipped.\n"
"HINT: Did you init your optimizer in `configure_optimizer` as such:\n"
f" {type(optimizer)}(filter(lambda p: p.requires_grad, self.parameters()), ...) ", UserWarning
)
return out_params
@staticmethod
def unfreeze_and_add_param_group(
modules: Union[Module, Iterable[Union[Module, Iterable]]],
optimizer: Optimizer,
lr: Optional[float] = None,
initial_denom_lr: float = 10.,
train_bn: bool = True,
) -> None:
"""
Unfreezes a module and adds its parameters to an optimizer.
Args:
modules: A module or iterable of modules to unfreeze.
Their parameters will be added to an optimizer as a new param group.
optimizer: The provided optimizer will receive new parameters and will add them to
`add_param_group`
lr: Learning rate for the new param group.
initial_denom_lr: If no lr is provided, the learning from the first param group will be used
and divided by initial_denom_lr.
train_bn: Whether to train the BatchNormalization layers.
Returns:
None
"""
BaseFinetuning.make_trainable(modules)
params_lr = optimizer.param_groups[0]['lr'] if lr is None else float(lr)
denom_lr = initial_denom_lr if lr is None else 1.
params = BaseFinetuning.filter_params(modules, train_bn=train_bn, requires_grad=True)
params = BaseFinetuning.filter_on_optimizer(optimizer, params)
if params:
optimizer.add_param_group({
'params': params,
'lr': params_lr / denom_lr,
})
def on_before_accelerator_backend_setup(self, trainer, pl_module):
self.freeze_before_training(pl_module)
@staticmethod
def __apply_mapping_to_param_groups(param_groups: List[Dict[str, Any]], mapping: dict) -> List[Dict[str, Any]]:
output = []
for g in param_groups:
# skip params to save memory
group_state = {k: v for k, v in g.items() if k != 'params'}
group_state['params'] = [mapping[p] for p in g['params']]
output.append(group_state)
return output
def _store(
self,
pl_module: 'pl.LightningModule',
opt_idx: int,
num_param_groups: int,
current_param_groups: List[Dict[str, Any]],
) -> None:
mapping = {p: n for n, p in pl_module.named_parameters()}
if opt_idx not in self._internal_state:
self._internal_state[opt_idx] = self.__apply_mapping_to_param_groups(current_param_groups, mapping)
elif num_param_groups != len(current_param_groups):
# save new param_groups possibly created by the users.
self._internal_state[opt_idx].extend(
self.__apply_mapping_to_param_groups(current_param_groups[num_param_groups:], mapping)
)
def on_train_epoch_start(self, trainer, pl_module):
"""Called when the epoch begins."""
for opt_idx, optimizer in trainer.fit_loop.get_active_optimizers():
num_param_groups = len(optimizer.param_groups)
self.finetune_function(pl_module, trainer.current_epoch, optimizer, opt_idx)
current_param_groups = optimizer.param_groups
self._store(pl_module, opt_idx, num_param_groups, current_param_groups)
def finetune_function(self, pl_module: 'pl.LightningModule', epoch: int, optimizer: Optimizer, opt_idx: int):
"""
Override to add your unfreeze logic
"""
raise NotImplementedError
def freeze_before_training(self, pl_module: 'pl.LightningModule'):
"""
Override to add your freeze logic
"""
raise NotImplementedError
class BackboneFinetuning(BaseFinetuning):
r"""
Finetune a backbone model based on a learning rate user-defined scheduling.
When the backbone learning rate reaches the current model learning rate
and ``should_align`` is set to True, it will align with it for the rest of the training.
Args:
unfreeze_backbone_at_epoch: Epoch at which the backbone will be unfreezed.
lambda_func: Scheduling function for increasing backbone learning rate.
backbone_initial_ratio_lr:
Used to scale down the backbone learning rate compared to rest of model
backbone_initial_lr: Optional, Inital learning rate for the backbone.
By default, we will use current_learning / backbone_initial_ratio_lr
should_align: Wheter to align with current learning rate when backbone learning
reaches it.
initial_denom_lr: When unfreezing the backbone, the intial learning rate will
current_learning_rate / initial_denom_lr.
train_bn: Wheter to make Batch Normalization trainable.
verbose: Display current learning rate for model and backbone
round: Precision for displaying learning rate
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import BackboneFinetuning
>>> multiplicative = lambda epoch: 1.5
>>> backbone_finetuning = BackboneFinetuning(200, multiplicative)
>>> trainer = Trainer(callbacks=[backbone_finetuning])
"""
def __init__(
self,
unfreeze_backbone_at_epoch: int = 10,
lambda_func: Callable = multiplicative,
backbone_initial_ratio_lr: float = 10e-2,
backbone_initial_lr: Optional[float] = None,
should_align: bool = True,
initial_denom_lr: float = 10.,
train_bn: bool = True,
verbose: bool = False,
round: int = 12,
):
super().__init__()
self.unfreeze_backbone_at_epoch = unfreeze_backbone_at_epoch
self.backbone_initial_lr = backbone_initial_lr
self.lambda_func = lambda_func
self.backbone_initial_ratio_lr = backbone_initial_ratio_lr
self.should_align = should_align
self.initial_denom_lr = initial_denom_lr
self.train_bn = train_bn
self.round = round
self.verbose = verbose
def on_fit_start(self, trainer, pl_module):
"""
Raises:
MisconfigurationException:
If LightningModule has no nn.Module `backbone` attribute.
"""
if hasattr(pl_module, "backbone") and isinstance(pl_module.backbone, Module):
return
raise MisconfigurationException("The LightningModule should have a nn.Module `backbone` attribute")
def freeze_before_training(self, pl_module: 'pl.LightningModule'):
self.freeze(pl_module.backbone)
def finetune_function(self, pl_module: 'pl.LightningModule', epoch: int, optimizer: Optimizer, opt_idx: int):
"""Called when the epoch begins."""
if epoch == self.unfreeze_backbone_at_epoch:
current_lr = optimizer.param_groups[0]['lr']
initial_backbone_lr = self.backbone_initial_lr if self.backbone_initial_lr is not None \
else current_lr * self.backbone_initial_ratio_lr
self.previous_backbone_lr = initial_backbone_lr
self.unfreeze_and_add_param_group(
pl_module.backbone,
optimizer,
initial_backbone_lr,
train_bn=self.train_bn,
initial_denom_lr=self.initial_denom_lr
)
if self.verbose:
log.info(
f"Current lr: {round(current_lr, self.round)}, "
f"Backbone lr: {round(initial_backbone_lr, self.round)}"
)
elif epoch > self.unfreeze_backbone_at_epoch:
current_lr = optimizer.param_groups[0]['lr']
next_current_backbone_lr = self.lambda_func(epoch + 1) * self.previous_backbone_lr
next_current_backbone_lr = current_lr if (self.should_align and next_current_backbone_lr > current_lr) \
else next_current_backbone_lr
optimizer.param_groups[-1]["lr"] = next_current_backbone_lr
self.previous_backbone_lr = next_current_backbone_lr
if self.verbose:
log.info(
f"Current lr: {round(current_lr, self.round)}, "
f"Backbone lr: {round(next_current_backbone_lr, self.round)}"
)
|
py | b40bbf896c3248a43461924f5295c939377a9c67 | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Genre, Subgenre, User
engine = create_engine('sqlite:///musicology.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# 1. Rock Genre and Subgenres
genre1 = Genre(user_id=1, name="Rock")
session.add(genre1)
session.commit()
subgenre1 = Subgenre(user_id=1, name="Hard Rock",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre1)
session.add(subgenre1)
session.commit()
subgenre2 = Subgenre(user_id=1, name="Rock & Roll",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre1)
session.add(subgenre2)
session.commit()
subgenre3 = Subgenre(user_id=1, name="Afro Punk",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre1)
session.add(subgenre3)
session.commit()
subgenre4 = Subgenre(user_id=1, name="Soft Rock",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre1)
session.add(subgenre4)
session.commit()
# 2. Classical Genre and Subgenres
genre2 = Genre(user_id=1, name="Classical Music")
session.add(genre2)
session.commit()
subgenre1 = Subgenre(user_id=1, name="Orchestral",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre2)
session.add(subgenre1)
session.commit()
subgenre2 = Subgenre(user_id=1, name="Opera",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre2)
session.add(subgenre2)
session.commit()
subgenre3 = Subgenre(user_id=1, name="Contemporary Classical",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre2)
session.add(subgenre3)
session.commit()
subgenre4 = Subgenre(user_id=1, name="Minimalism",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre2)
session.add(subgenre4)
session.commit()
# 3. Hip-Hop Genre and Subgenres
genre3 = Genre(user_id=1, name="Hip-Hop")
session.add(genre3)
session.commit()
subgenre1 = Subgenre(user_id=1, name="Alternative Rap",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre3)
session.add(subgenre1)
session.commit()
subgenre2 = Subgenre(user_id=1, name="Old School",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre3)
session.add(subgenre2)
session.commit()
subgenre3 = Subgenre(user_id=1, name="Underground Rap",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre3)
session.add(subgenre3)
session.commit()
subgenre4 = Subgenre(user_id=1, name="Turntablism",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre3)
session.add(subgenre4)
session.commit()
subgenre5 = Subgenre(user_id=1, name="Experimental",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre3)
session.add(subgenre5)
session.commit()
# 4. World Music Genre and Subgenres
genre4 = Genre(user_id=1, name="World Music")
session.add(genre4)
session.commit()
subgenre1 = Subgenre(user_id=1, name="Afro-Beat",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre4)
session.add(subgenre1)
session.commit()
subgenre2 = Subgenre(user_id=1, name="Calypso",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre4)
session.add(subgenre2)
session.commit()
subgenre3 = Subgenre(user_id=1, name="K-Pop",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre4)
session.add(subgenre3)
session.commit()
subgenre4 = Subgenre(user_id=1, name="Mbalax",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre4)
session.add(subgenre4)
session.commit()
subgenre5 = Subgenre(user_id=1, name="Zouk",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre4)
session.add(subgenre5)
session.commit()
# 5. Reggae Genre and Subgenres
genre5 = Genre(name="Reggae")
session.add(genre5)
session.commit()
subgenre1 = Subgenre(user_id=1, name="Dancehall",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre5)
session.add(subgenre1)
session.commit()
subgenre2 = Subgenre(user_id=1, name="Roots Reggae",
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
genre=genre5)
session.add(subgenre2)
session.commit()
print "New music genres & subgenres added!"
|
py | b40bc177a4c8ea45eb33aab297f691415567149b | import torch
from torch.nn import Module, Linear
from torch.nn.functional import relu
class Model(Module):
"""An implementation of torch.nn.Module.
Args:
Module (Class): generic pytroch model class.
"""
def __init__(self, in_shape: torch.Size, num_classes: int):
"""Initialize the model
Args:
in_shape (torch.Size): the shape of input.
num_classes (int, optional): number of output classes.
"""
super(Model, self).__init__()
# Parameters
self.in_features = torch.prod(torch.tensor(in_shape[1:]))
self.num_classes = num_classes
# Define layers
self.fc0 = Linear(self.in_features, 32)
self.fc1 = Linear(32, 32)
self.fc2 = Linear(32, self.num_classes)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Feed data through the model.
Args:
x (torch.Tensor): data.
Returns:
torch.Tensor: label.
"""
x = relu(self.fc0(x))
x = relu(self.fc1(x))
x = self.fc2(x)
return x
|
py | b40bc24a94d54c71667068480405f7d8658486b5 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from reco_utils.recommender.newsrec.models.base_model import BaseModel
from reco_utils.recommender.newsrec.models.layers import AttLayer2
__all__ = ["NAMLModel"]
class NAMLModel(BaseModel):
"""NAML model(Neural News Recommendation with Attentive Multi-View Learning)
Chuhan Wu, Fangzhao Wu, Mingxiao An, Jianqiang Huang, Yongfeng Huang and Xing Xie,
Neural News Recommendation with Attentive Multi-View Learning, IJCAI 2019
Attributes:
word2vec_embedding (numpy.array): Pretrained word embedding matrix.
hparam (obj): Global hyper-parameters.
"""
def __init__(self, hparams, iterator_creator, seed=None):
"""Initialization steps for NAML.
Compared with the BaseModel, NAML need word embedding.
After creating word embedding matrix, BaseModel's __init__ method will be called.
Args:
hparams (obj): Global hyper-parameters. Some key setttings such as filter_num are there.
iterator_creator_train(obj): NAML data loader class for train data.
iterator_creator_test(obj): NAML data loader class for test and validation data
"""
self.word2vec_embedding = self._init_embedding(hparams.wordEmb_file)
self.hparam = hparams
super().__init__(hparams, iterator_creator, seed=seed)
def _get_input_label_from_iter(self, batch_data):
input_feat = [
batch_data["impression_index_batch"],
batch_data["user_index_batch"],
batch_data["clicked_title_batch"],
batch_data["clicked_body_batch"],
batch_data["clicked_vert_batch"],
batch_data["clicked_subvert_batch"],
batch_data["candidate_title_batch"],
batch_data["candidate_body_batch"],
batch_data["candidate_vert_batch"],
batch_data["candidate_subvert_batch"]
]
input_label = batch_data["labels"]
return input_feat, input_label
def _init_embedding(self, file_path):
"""Load pre-trained embeddings as a constant tensor.
Args:
file_path (str): the pre-trained embeddings filename.
Returns:
np.array: A constant numpy array.
"""
return np.load(file_path).astype(np.float32)
def _build_graph(self):
"""Build NAML model and scorer.
Returns:
obj: a model used to train.
obj: a model used to evaluate and inference.
"""
model, scorer = self._build_naml()
return model, scorer
def _build_userencoder(self, newsencoder):
"""The main function to create user encoder of NAML.
Args:
newsencoder(obj): the news encoder of NAML.
Return:
obj: the user encoder of NAML.
"""
hparams = self.hparams
his_input_title_body_verts = keras.Input(
shape=(hparams.his_size, hparams.title_size + hparams.body_size + 2),
dtype="int32",
)
click_news_presents = layers.TimeDistributed(newsencoder)(
his_input_title_body_verts
)
user_present = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(click_news_presents)
model = keras.Model(
his_input_title_body_verts, user_present, name="user_encoder"
)
return model
def _build_newsencoder(self, embedding_layer):
"""The main function to create news encoder of NAML.
news encoder in composed of title encoder, body encoder, vert encoder and subvert encoder
Args:
embedding_layer(obj): a word embedding layer.
Return:
obj: the news encoder of NAML.
"""
hparams = self.hparams
input_title_body_verts = keras.Input(
shape=(hparams.title_size + hparams.body_size + 2,), dtype="int32"
)
sequences_input_title = layers.Lambda(lambda x: x[:, : hparams.title_size])(
input_title_body_verts
)
sequences_input_body = layers.Lambda(
lambda x: x[:, hparams.title_size : hparams.title_size + hparams.body_size]
)(input_title_body_verts)
input_vert = layers.Lambda(
lambda x: x[
:,
hparams.title_size
+ hparams.body_size : hparams.title_size
+ hparams.body_size
+ 1,
]
)(input_title_body_verts)
input_subvert = layers.Lambda(
lambda x: x[:, hparams.title_size + hparams.body_size + 1 :]
)(input_title_body_verts)
title_repr = self._build_titleencoder(embedding_layer)(sequences_input_title)
body_repr = self._build_bodyencoder(embedding_layer)(sequences_input_body)
vert_repr = self._build_vertencoder()(input_vert)
subvert_repr = self._build_subvertencoder()(input_subvert)
concate_repr = layers.Concatenate(axis=-2)(
[title_repr, body_repr, vert_repr, subvert_repr]
)
news_repr = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(concate_repr)
model = keras.Model(input_title_body_verts, news_repr, name="news_encoder")
return model
def _build_titleencoder(self, embedding_layer):
"""build title encoder of NAML news encoder.
Args:
embedding_layer(obj): a word embedding layer.
Return:
obj: the title encoder of NAML.
"""
hparams = self.hparams
sequences_input_title = keras.Input(shape=(hparams.title_size,), dtype="int32")
embedded_sequences_title = embedding_layer(sequences_input_title)
y = layers.Dropout(hparams.dropout)(embedded_sequences_title)
y = layers.Conv1D(
hparams.filter_num,
hparams.window_size,
activation=hparams.cnn_activation,
padding="same",
bias_initializer=keras.initializers.Zeros(),
kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed)
)(y)
y = layers.Dropout(hparams.dropout)(y)
pred_title = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y)
pred_title = layers.Reshape((1, hparams.filter_num))(pred_title)
model = keras.Model(sequences_input_title, pred_title, name="title_encoder")
return model
def _build_bodyencoder(self, embedding_layer):
"""build body encoder of NAML news encoder.
Args:
embedding_layer(obj): a word embedding layer.
Return:
obj: the body encoder of NAML.
"""
hparams = self.hparams
sequences_input_body = keras.Input(shape=(hparams.body_size,), dtype="int32")
embedded_sequences_body = embedding_layer(sequences_input_body)
y = layers.Dropout(hparams.dropout)(embedded_sequences_body)
y = layers.Conv1D(
hparams.filter_num,
hparams.window_size,
activation=hparams.cnn_activation,
padding="same",
bias_initializer=keras.initializers.Zeros(),
kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed)
)(y)
y = layers.Dropout(hparams.dropout)(y)
pred_body = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y)
pred_body = layers.Reshape((1, hparams.filter_num))(pred_body)
model = keras.Model(sequences_input_body, pred_body, name="body_encoder")
return model
def _build_vertencoder(self):
"""build vert encoder of NAML news encoder.
Return:
obj: the vert encoder of NAML.
"""
hparams = self.hparams
input_vert = keras.Input(shape=(1,), dtype="int32")
vert_embedding = layers.Embedding(
hparams.vert_num, hparams.vert_emb_dim, trainable=True
)
vert_emb = vert_embedding(input_vert)
pred_vert = layers.Dense(
hparams.filter_num,
activation=hparams.dense_activation,
bias_initializer=keras.initializers.Zeros(),
kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed)
)(vert_emb)
pred_vert = layers.Reshape((1, hparams.filter_num))(pred_vert)
model = keras.Model(input_vert, pred_vert, name="vert_encoder")
return model
def _build_subvertencoder(self):
"""build subvert encoder of NAML news encoder.
Return:
obj: the subvert encoder of NAML.
"""
hparams = self.hparams
input_subvert = keras.Input(shape=(1,), dtype="int32")
subvert_embedding = layers.Embedding(
hparams.subvert_num, hparams.subvert_emb_dim, trainable=True
)
subvert_emb = subvert_embedding(input_subvert)
pred_subvert = layers.Dense(
hparams.filter_num,
activation=hparams.dense_activation,
bias_initializer=keras.initializers.Zeros(),
kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed)
)(subvert_emb)
pred_subvert = layers.Reshape((1, hparams.filter_num))(pred_subvert)
model = keras.Model(input_subvert, pred_subvert, name="subvert_encoder")
return model
def _build_naml(self):
"""The main function to create NAML's logic. The core of NAML
is a user encoder and a news encoder.
Returns:
obj: a model used to train.
obj: a model used to evaluate and predict.
"""
hparams = self.hparams
his_input_title = keras.Input(
shape=(hparams.his_size, hparams.title_size), dtype="int32"
)
his_input_body = keras.Input(
shape=(hparams.his_size, hparams.body_size), dtype="int32"
)
his_input_vert = keras.Input(shape=(hparams.his_size, 1), dtype="int32")
his_input_subvert = keras.Input(shape=(hparams.his_size, 1), dtype="int32")
pred_input_title = keras.Input(
shape=(hparams.npratio + 1, hparams.title_size), dtype="int32"
)
pred_input_body = keras.Input(
shape=(hparams.npratio + 1, hparams.body_size), dtype="int32"
)
pred_input_vert = keras.Input(shape=(hparams.npratio + 1, 1), dtype="int32")
pred_input_subvert = keras.Input(shape=(hparams.npratio + 1, 1), dtype="int32")
pred_input_title_one = keras.Input(
shape=(1, hparams.title_size,), dtype="int32"
)
pred_input_body_one = keras.Input(shape=(1, hparams.body_size,), dtype="int32")
pred_input_vert_one = keras.Input(shape=(1, 1), dtype="int32")
pred_input_subvert_one = keras.Input(shape=(1, 1), dtype="int32")
his_title_body_verts = layers.Concatenate(axis=-1)(
[his_input_title, his_input_body, his_input_vert, his_input_subvert]
)
pred_title_body_verts = layers.Concatenate(axis=-1)(
[pred_input_title, pred_input_body, pred_input_vert, pred_input_subvert]
)
pred_title_body_verts_one = layers.Concatenate(axis=-1)(
[
pred_input_title_one,
pred_input_body_one,
pred_input_vert_one,
pred_input_subvert_one,
]
)
pred_title_body_verts_one = layers.Reshape((-1,))(pred_title_body_verts_one)
imp_indexes = keras.Input(shape=(1,), dtype="int32")
user_indexes = keras.Input(shape=(1,), dtype="int32")
embedding_layer = layers.Embedding(
hparams.word_size,
hparams.word_emb_dim,
weights=[self.word2vec_embedding],
trainable=True,
)
newsencoder = self._build_newsencoder(embedding_layer)
userencoder = self._build_userencoder(newsencoder)
user_present = userencoder(his_title_body_verts)
news_present = layers.TimeDistributed(newsencoder)(pred_title_body_verts)
news_present_one = newsencoder(pred_title_body_verts_one)
preds = layers.Dot(axes=-1)([news_present, user_present])
preds = layers.Activation(activation="softmax")(preds)
pred_one = layers.Dot(axes=-1)([news_present_one, user_present])
pred_one = layers.Activation(activation="sigmoid")(pred_one)
model = keras.Model(
[
imp_indexes,
user_indexes,
his_input_title,
his_input_body,
his_input_vert,
his_input_subvert,
pred_input_title,
pred_input_body,
pred_input_vert,
pred_input_subvert,
],
preds,
)
scorer = keras.Model(
[
imp_indexes,
user_indexes,
his_input_title,
his_input_body,
his_input_vert,
his_input_subvert,
pred_input_title_one,
pred_input_body_one,
pred_input_vert_one,
pred_input_subvert_one,
],
pred_one,
)
return model, scorer
|
py | b40bc2a20dd180e2c532ceefbbf7ed7e146020e2 | import os
from collections import Counter
import pickle
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from sklearn.metrics import mean_absolute_error as mae
from sklearn.metrics import mean_squared_error as mse
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
cv = CountVectorizer(strip_accents='ascii', stop_words='english')
vectorizer = TfidfVectorizer()
def tfidf_train(X_train, X_val, y_train, y_val):
model = DecisionTreeRegressor()
# Save the tfidf model
X_train_transformed = _tokenize(X_train)
X_val_transformed = _transform_tfidf(X_val)
# Train, save and evaluate model
saved_path = './modelling/saved_models' + '/tfidf_coronatweetmodel.h5'
saved_model = model.fit(X_train_transformed, y_train)
pickle.dump(saved_model, open(saved_path, 'wb'))
print('model saved')
return saved_model, X_train_transformed, X_val_transformed
def _tokenize(train_df):
corpus = train_df.values
vec_cv = vectorizer.fit_transform(corpus)
saved_path = './modelling/saved_models' + '/tfidf.pickle'
pickle.dump(vectorizer, open(saved_path, "wb"))
print('tfidf saved')
return vec_cv
def _transform_tfidf(df):
corpus = df.values
tfidt = vectorizer.transform(corpus)
return tfidt
def tfidf_predict(model,X_test, y_test):
# model = load_model(model)
# vectorizer
tf1 = pickle.load(open("/Users/sukyee/Desktop/team5/tfidf.pickle", 'rb'))
X_test_transformed = tf1.transform(X_test.values)
print(X_test_transformed.shape)
test_predict = model.predict(X_test_transformed)
test_score = np.sqrt(mse(y_test, test_predict))
print("RMSE on test set:", test_score)
<<<<<<< HEAD
=======
>>>>>>> origin/readme
|
py | b40bc336e471f63c135ffbf2dc1a3ed3003cda8c |
def num_to_micro_amount(num, precision=2):
"""
Converts a number into micro-amounts (multiplied by 1M), rounded to
specified precision. Useful for more easily working with currency,
and also for communicating with DFP API.
Args:
num (float or int)
precision (int)
Returns:
an integer: int(num * 1,000,000), rounded to the nearest
10^(6-`precision`)
"""
rounding = -6 + precision
return int(round(num * (10 ** 6), rounding))
def micro_amount_to_num(micro_amount):
"""
Converts micro-amount into its number.
Args:
micro_amount (int)
Returns:
a float: micro_amount divided by 1M
"""
return float(micro_amount) / (10 ** 6)
def num_to_str(num, precision=2):
"""
Converts num into a string with `precision` number
of decimal places.
Args:
num (float or int)
precision (int)
Returns:
a string
"""
return '%.{0}f'.format(str(precision)) % num
def adjust_price_bucket_by_price_multiplier(price_bucket, price_multiplier):
"""
Args:
price_bucket (object): the price bucket configuration
price_multiplier (int): the price_multiplier to adjust price bucket
:return: updated price_bucket
"""
new_price_buckets = {
'precision': price_bucket['precision'],
'min': price_bucket['min'] * price_multiplier,
'max': price_bucket['max'] * price_multiplier,
'increment': price_bucket['increment'] * price_multiplier,
}
return new_price_buckets
def get_prices_array_from_price_bucket_list(price_bucket_list):
"""
Creates an array of prices from a list of price buckets.
Args:
price_bucket_list (list): a list of price bucket configuration
Returns:
an array of integers (list):
"""
prices = []
for price_bucket in price_bucket_list:
prices.extend(get_prices_array(price_bucket))
return prices
def get_prices_array(price_bucket):
"""
Creates an array of price bucket cutoffs in micro-amounts
from a price_bucket configuration.
Args:
price_bucket (object): the price bucket configuration
Returns:
an array of integers: every price bucket cutoff from:
int(round(price_bucket['min'] * 10**6, precision)) to
int(round(price_bucket['max'] * 10**6, precision))
"""
# Arbitrary max CPM to prevent large user errors.
end_cpm = price_bucket['max']
start_cpm = price_bucket['min'] if price_bucket['min'] >=0 else 0.00
increment = price_bucket['increment']
precision = price_bucket['precision']
start_cpm_micro_amount = num_to_micro_amount(start_cpm, precision)
end_cpm_micro_amount = num_to_micro_amount(end_cpm, precision)
increment_micro_amount = num_to_micro_amount(increment, precision)
current_cpm_micro_amount = start_cpm_micro_amount
prices = []
while current_cpm_micro_amount <= end_cpm_micro_amount:
prices.append(current_cpm_micro_amount)
current_cpm_micro_amount += increment_micro_amount
return prices
def get_prices_summary_string(prices_array, precision=2):
"""
Returns a string preview of the prices array.
Args:
prices_array (array): the list of prices in micro-amounts
Returns:
a string: a preview of the first few and last few
items in the array in regular amounts (converted from
micro-amounts).
"""
if (len(prices_array) < 6):
summary = ', '.join(
[num_to_str(micro_amount_to_num(price), precision)
for price in prices_array])
else:
summary = '{0}, {1}, {2}, ... {3}, {4}, {5}'.format(
num_to_str(micro_amount_to_num(prices_array[0]), precision),
num_to_str(micro_amount_to_num(prices_array[1]), precision),
num_to_str(micro_amount_to_num(prices_array[2]), precision),
num_to_str(micro_amount_to_num(prices_array[-3]), precision),
num_to_str(micro_amount_to_num(prices_array[-2]), precision),
num_to_str(micro_amount_to_num(prices_array[-1]), precision),
)
return summary
|
py | b40bc33e188e84cd5e7d22c9c9871d4365267f34 | '''
Created on Jul 30, 2011
@author: michel
'''
from FeatureServer.Service.Request import Request
import vectorformats.Formats.OV2
class OV2(Request):
def encode(self, results):
ov2 = vectorformats.Formats.OV2.OV2(layername=self.datasources[0])
output = ov2.encode(results)
headers = {
'Accept': '*/*',
'Content-Disposition' : 'attachment; filename=poidownload.ov2',
'Content-Transfer-Encoding' : 'binary'
}
return ("application/octet-stream", output, headers, '')
|
py | b40bc362411c50832dc6d73c7e053c86b3c971dd | from flask import Flask, jsonify
from csv import reader
from json import dumps
import typing as t
import os
from datetime import datetime, timedelta
from subprocess import check_output, CalledProcessError, run
import hashlib
from sys import argv
DEBUG = bool(os.environ.get('DEBUG'))
DIRECTORY = '/tmp'
CACHE_TIME: timedelta = timedelta(seconds=5)
ANCIENT_TIME = datetime(1993, 2, 13, 0, 0, 0, 0)
APP_DIR = '/app' if not DEBUG else '.'
def dprint(*args, **kwargs):
if DEBUG:
print(*args, **kwargs)
def absolute(filename_: str) -> str:
return os.path.join(DIRECTORY, filename_)
def checksum(url: str) -> str:
return hashlib.sha256(url.encode('utf-8')).hexdigest()
def file_data(url: str) -> (str, str):
return str(datetime.now().timestamp()), checksum(url)
def retrieve_data(filename_: str) -> t.Optional[t.Tuple[str, str]]:
split = filename_.split('_')
return tuple(split) if len(split) == 2 else None
def filename(url: str) -> (str, str):
return '_'.join(file_data(url))
def matches_hash(path: str, hash_: str) -> bool:
data = retrieve_data(path)
return data[1] == hash_ if data else False
def already_saved(hash_: str) -> t.Generator[str, None, None]:
yield from (f for f in os.listdir(DIRECTORY) if matches_hash(absolute(f), hash_))
def valid_cache(url: str) -> t.Optional[str]:
time, hash_ = file_data(url)
try:
latest = next((f for f in sorted(
already_saved(hash_),
key=lambda f: retrieve_data(absolute(f[1])) or ANCIENT_TIME, reverse=True,
)))
except StopIteration:
return None
delta = datetime.fromtimestamp(float(time)) - datetime.fromtimestamp(float(retrieve_data(latest)[0]))
return latest if delta <= CACHE_TIME else None
def get_file_handle(url: str) -> str:
cache = valid_cache(url)
return absolute(cache) if cache else create_new_handle(url)
def create_new_handle(url: str) -> t.Optional[str]:
dprint(f'DEBUG creating new file for {url}')
filename_ = filename(url)
path = absolute(filename_)
command = [
os.path.join(APP_DIR, 'goodls_linux_amd64'),
'-u',
url,
'-e',
'csv',
'--overwrite',
'--directory',
DIRECTORY,
'--filename',
filename_,
]
command = ' '.join(command)
dprint('[DEBUG]', command)
try:
dprint('DEBUG: ', check_output(command, shell=True))
except CalledProcessError:
return None
return path
def get_gdrive_contents(url: str) -> t.List[t.List[str]]:
path = get_file_handle(url)
if not path:
return [[]]
with open(path, 'r') as f:
r = reader(f)
return list(map(list, r))
app = Flask(__name__)
@app.after_request # https://stackoverflow.com/a/45818284/9014011
def after_request(response):
header = response.headers
header['Access-Control-Allow-Origin'] = '*'
return response
@app.route('/document/<path:url>')
def file(url: str):
url = url.strip()
if url.endswith('edit'):
url = url + '?usp\\=sharing'
dprint('[DEBUG]', url)
return jsonify(get_gdrive_contents(url or ''))
if __name__ == '__main__':
url_ = argv[2]
print(get_gdrive_contents(url_))
|
py | b40bc395328b8a380b9c08557d712ee9df01b691 | import os,sys,math,numpy as np, matplotlib.pyplot as plt
def getsplits_2cls_plusminus(labels ,numcv):
import numpy as np
indexlist=[ [] for i in range(numcv) ]
#4 sets
if np.sum(labels==0)>0:
print('??? np.sum(labels==0)>0')
exit()
for lbtype in [-1,1]:
indices=[i for i in range(len(labels)) if (labels[i]==lbtype) ]
if( len(indices) >0):
np.random.shuffle(indices)
num=int(math.floor(len(indices)/float(numcv)))
if num==0:
print('ERR: number of data in slice is 0, exiting!',len(indices),numcv,[lbtype,censtype])
exit()
for cv in range(numcv):
for k in range(cv*num,(cv+1)*num):
indexlist[cv].append(indices[k])
for k in range(numcv*num,len(indices)):
rnd=np.random.randint(0,numcv)
indexlist[rnd].append(indices[k])
return indexlist
def getsplits_realvalued(obslb,numcv):
bsizemult=2
inds=np.argsort(obslb)
indspercv=[ [] for _ in range(numcv) ]
for i in inds:
print(obslb[i])
numbuckets=int(math.floor(len(obslb)/(bsizemult*numcv)))
rem= len(obslb)-numbuckets*(bsizemult*numcv)
print(numbuckets,rem)
for bind in range(numbuckets):
bstart=bind*(bsizemult*numcv)
bend=(bind+1)*(bsizemult*numcv)
if bind==numbuckets-1 :
bend+=int(rem)
np.random.shuffle(inds[bstart:bend])
#print(bstart,bend)
for cv in range(numcv):
indspercv[cv].extend( inds[bstart+cv*bsizemult:bstart+(cv+1)*bsizemult] )
if bind==numbuckets-1 :
cv=np.random.choice(np.asarray([i for i in range(numcv)],dtype=np.int32), 1)
cv=cv[0]
#print(type(cv),type(bend),bstart+numcv*bsizemult,bend)
indspercv[cv].extend(inds[bstart+numcv*bsizemult:bend] )
return indspercv
def getsplits_ncls_categoricalapprox(labels ,numcl,numcv):
#labels.shape = (numdata,numcl)
if labels.shape[1]!=numcl:
print('wrong number of classes')
exit()
counts = np.zeros(numcl)
for cl in range(numcl):
counts[cl]=np.sum(labels[:,cl]>0)
print(counts)
if np.sum(counts)<labels.shape[0]:
print('np.sum(counts)<labels.shape[0]', np.sum(counts),labels.shape[0])
exit()
if np.sum(counts)>labels.shape[0]:
#iterative resolution of multiple labels
#partial overlap
print('partial overlap')
pseudolabels = labels
for si in range (labels.shape[0]):
if np.sum( labels[si,:]>0) >1:
#have overlap, suppress for splitting inversely proportionally
valinds = [ cl for cl in range(numcl) if labels[si,cl]>0 ]
# inversely proportional, preserve smallest class
p = 1.0/counts[valinds] #np.array([ 1.0/counts[cl] for cl in valinds ])
p = p / np.sum(p)
chosencl= np.random.choice(valinds, size=1, replace=False, p=p)
for cl in range(numcl):
if cl !=chosencl:
pseudolabels[si, cl] = 0
if np.sum( pseudolabels[si,:]>0) !=1:
print( ' np.sum( pseudolabels[si,:]>0) !=1:', pseudolabels[si,:] )
exit()
else:
print('no overlap')
pseudolabels = labels
posinds=[ [] for cl in range(numcl) ]
for cl in range(numcl):
posinds[cl]=[i for i in range(labels.shape[0]) if (pseudolabels[i,cl]>0) ]
print(len(posinds[cl]))
#exit()
indexlist=[ [] for i in range(numcv) ]
for cl in range(numcl):
indices=posinds[cl]
np.random.shuffle(indices)
num=int(math.floor(len(indices)/float(numcv)))
if num==0:
print('ERR: number of data in slice is 0, exiting!',len(indices),numcv,[lbtype,censtype])
exit()
for cv in range(numcv):
for k in range(cv*num,(cv+1)*num):
indexlist[cv].append(indices[k])
for k in range(numcv*num,len(indices)):
rnd=np.random.randint(0,numcv)
indexlist[rnd].append(indices[k])
#...
globalp = counts/np.sum(counts)
print('global', globalp )
for cv in range(numcv):
splitcounts = np.zeros(numcl)
for cl in range(numcl):
splitcounts[cl] = np.sum( labels[indexlist[cv],cl]>0 )
print('cv',cv,'local:', splitcounts / np.sum( splitcounts ) )
#print('cv',cv,'diff:', globalp- splitcounts / np.sum( splitcounts ) )
return indexlist
def split():
np.random.seed(seed=3)
outpath = './icesplits_v2_09052021'
numcl = 9
numcv = 10
with open('test_withBoundaries_new_Julie.npy','rb') as f:
a = np.load(f,allow_pickle=True)
b = np.load(f,allow_pickle=True)
c = np.load(f,allow_pickle=True)
labels = np.zeros((c.shape[0],numcl))
for l in range(c.shape[0]):
labels[l,:]=c[l]
indexlist = getsplits_ncls_categoricalapprox(labels ,numcl,numcv)
if not os.path.isdir(outpath):
os.makedirs(outpath)
for cv in range(numcv):
fname= os.path.join(outpath,'split_cv'+str(cv)+'.txt')
with open(fname,'w+') as f:
for i,ind in enumerate(indexlist[cv]):
if i==0:
f.write('{:d}'.format(ind))
else:
f.write(' '+'{:d}'.format(ind))
f.write('\n')
if __name__=='__main__':
split()
|
py | b40bc3ddbfa6fae3b1d08a5a70458b6f322598b9 | #ETL using Spark
# To MariaDB
# Create Schema using
import pyspark
from pyspark.sql import SparkSession
#from pyspark
from pyspark.sql import Row as Row
from pyspark.sql.types import *
from pyspark.sql import functions as f
import datetime as dt
#declaring schema for the dataframe
'''schema = StructType([StructField("no",IntegerType(),True),\
StructField("date",StringType(),True),\
StructField("avgprice",DecimalType(),True),\
StructField("totvolume",DecimalType(),True),\
StructField("typ4046",DecimalType(),True),\
StructField("typ4225",DecimalType(),True),\
StructField("typ4770",DecimalType(),True),\
StructField("tot_bags",DecimalType(),True),\
StructField("small_bags",DecimalType(),True),\
StructField("large_bags",DecimalType(),True),\
StructField("xl_bags",DecimalType(),True),\
StructField("type",StringType(),True),\
StructField("year",IntegerType(),True),\
StructField("region",IntegerType(),True)])
print(schema)'''
'''SparkContext for the application'''
sc = pyspark.SparkContext(master = "local[*]",appName="Pi")
rdd1 = sc.textFile("C:\\KaggleDatasets\\avocado.csv") #Any csv file with or without header I am using Avocado dataset available in Kaggle
first1= rdd1.first()
rddfilt = rdd1.filter(lambda l: l != first1) #removing the header line of csv file converted to rdd
rddsplit = rddfilt.map(lambda l: l.split(","))
rddrow = rddsplit.map( lambda l: Row(l[0],l[1],l[2],l[3],l[4],l[5],l[6],l[7],\
l[8],l[9],l[10],l[11],l[12],l[13]))
spark = SparkSession(sc)
df = spark.createDataFrame(rddrow) # without schema
df.printSchema()
df.show(10)
df.createOrReplaceTempView("avocado_dtls")
dfsql = spark.sql("select * from avocado_dtls limit 10") # selecting all columns
dfsql.show(10)
sc.stop()
|
py | b40bc4451d6839ef6d76e48a1bfedafaadc9f767 | from collections import OrderedDict
from sdk_release_tools import ops
from sdk_release_tools import rpm
from sdk_release_tools.aws import get_bucket
from sdk_release_tools.versions import parse_major_minor, parse_version
import json
import os
def load_schema(schema_name):
filepath = (schema_name if schema_name.endswith('.json') else
'{}.json'.format(schema_name))
with open(filepath) as schema_file:
return json.loads(schema_file.read())
def get_variables(schema, version):
"""
Get the variables defined in the schema and merge them with any version
number variables (e.g., "major", "minor", "patch", etc.).
"""
variables = schema.get('variables', {})
variables.update(version.__dict__)
variables.update(version=str(version))
return variables
def delete(realm, schema, version, dry_run=True, silent=False):
artifacts = schema.get('artifacts', {})
variables = get_variables(schema, version)
return ops.delete(artifacts, bucket=get_bucket(realm), variables=variables,
dry_run=dry_run, silent=silent)
def download(realm, schema, version, root, dry_run=True):
artifacts = schema.get('artifacts', {})
variables = get_variables(schema, version)
return ops.download(artifacts, root=root, bucket=get_bucket(realm),
variables=variables, dry_run=dry_run)
def pin(realm, schema, version, dry_run=False):
rules = schema.get('pin', {})
variables = get_variables(schema, version)
copy_on_pin = schema.get('copy_on_pin', False)
return ops.pin(rules, bucket=get_bucket(realm), variables=variables,
dry_run=dry_run, copy_on_pin=copy_on_pin)
def pin_latest(realm, schema, version, dry_run=False):
rules = schema.get('latest', {})
variables = get_variables(schema, version)
copy_on_pin = schema.get('copy_on_pin', False)
return ops.pin(rules, bucket=get_bucket(realm), variables=variables,
dry_run=dry_run, copy_on_pin=copy_on_pin)
def unpin(realm, schema, version, dry_run=False):
rules = schema.get('pin', {})
variables = get_variables(schema, version)
return ops.unpin(rules, bucket=get_bucket(realm), variables=variables,
dry_run=dry_run)
def unpin_latest(realm, schema, version, dry_run=False):
rules = schema.get('latest', {})
variables = get_variables(schema, version)
return ops.unpin(rules, bucket=get_bucket(realm), variables=variables,
dry_run=dry_run)
def upload(realm, schema, version, root, dry_run=True):
artifacts = schema.get('artifacts', {})
variables = get_variables(schema, version)
if not os.path.isdir(root):
root = rpm.unpack(root)
return ops.upload(artifacts, root=root, bucket=get_bucket(realm),
variables=variables, dry_run=dry_run)
def get_cors(realm):
bucket = get_bucket(realm)
return bucket.get_cors()
def get_versions(realm, schema):
bucket = get_bucket(realm)
config = bucket.get_website_configuration_obj()
unordered_versions = []
versions_dir = schema.get('versions').format(**schema.get('variables', {}))
for key in bucket.list(versions_dir, '/'):
try:
version = parse_version(os.path.split(key.name.rstrip('/'))[1])
except:
continue
unordered_versions.append(version)
ordered_versions = OrderedDict()
for version in sorted(unordered_versions):
ordered_versions[str(version)] = None
major_minor_versions_dir = schema.get('major_minor_versions').format(
**schema.get('variables', {}))
ordered_major_minors = OrderedDict()
latest = None
# First, try to identify any versions pinned by S3 Key redirects.
for key in bucket.list(major_minor_versions_dir, '/'):
major_minor = None
try:
major_minor = parse_major_minor(
os.path.split(key.name.rstrip('/'))[1])
except:
if os.path.split(key.name.rstrip('/'))[1] != "latest":
continue
# This is a little bit of a hack: we are going to iterate through the
# prefixes until we find one that matches a key. Once we have the key,
# we need to check if it has a redirect to a version number.
version = None
for key in bucket.list(key.name, '/'):
if key.name.endswith('/'):
continue
key = bucket.get_key(key.name)
if not key:
continue
redirect = key.get_redirect()
if not redirect:
continue
if redirect.startswith('/' + versions_dir.lstrip('/')):
redirect = redirect[len('/' + versions_dir.lstrip('/')):]
try:
version = parse_version(redirect.split('/')[0])
except:
continue
break
if not version:
continue
if not major_minor:
latest = version
version_str = str(version)
if version_str in ordered_versions:
ordered_versions[version_str] = major_minor
ordered_major_minors[str(major_minor)] = version
# Then, try to identify any versions pinned by RoutingRules (legacy).
for rule in config.routing_rules:
key_prefix = rule.condition.key_prefix
if not key_prefix.startswith(major_minor_versions_dir):
continue
replace_key_prefix = rule.redirect.replace_key_prefix
try:
major_minor = parse_major_minor(
os.path.split(key_prefix.rstrip('/'))[1])
version = parse_version(
os.path.split(replace_key_prefix.rstrip('/'))[1])
except:
continue
version_str = str(version)
if version_str in ordered_versions:
ordered_versions[version_str] = major_minor
ordered_major_minors[str(major_minor)] = version
return (ordered_versions, ordered_major_minors, latest)
def version_exists(realm, schema, version):
ordered_versions, _, _ = get_versions(realm, schema)
return str(version) in ordered_versions
def get_pinned_by(realm, schema, version):
"""
Get the major/minor pair that pins a version.
"""
ordered_versions, _, _ = get_versions(realm, schema)
return ordered_versions.get(str(version))
|
py | b40bc48cc24891c391de2c3f03b2bc07b6324b54 | # -*- coding: utf-8 -*-
def average_temps(temps):
sum_of_temps = 0
for temp in temps:
sum_of_temps += float(temp)
return sum_of_temps / len(temps)
if __name__ == '__main__':
temps = [21, 24, 24, 22, 20, 23, 24]
average = average_temps(temps)
print('La temperatura promedio es: {}'.format(average)) |
py | b40bc48f47a0040cce73627c8f7c7ea86ef1f0c3 | # Copyright 2000, 2004 by Brad Chapman.
# Revisions copyright 2010-2013, 2015-2018 by Peter Cock.
# All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Code for dealing with sequence alignments.
One of the most important things in this module is the MultipleSeqAlignment
class, used in the Bio.AlignIO module.
"""
import warnings
from Bio import BiopythonDeprecationWarning
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord, _RestrictedDict
from Bio import Alphabet
from Bio.Align import _aligners
# Import errors may occur here if a compiled aligners.c file
# (_aligners.pyd or _aligners.so) is missing or if the user is
# importing from within the Biopython source tree, see PR #2007:
# https://github.com/biopython/biopython/pull/2007
class MultipleSeqAlignment:
"""Represents a classical multiple sequence alignment (MSA).
By this we mean a collection of sequences (usually shown as rows) which
are all the same length (usually with gap characters for insertions or
padding). The data can then be regarded as a matrix of letters, with well
defined columns.
You would typically create an MSA by loading an alignment file with the
AlignIO module:
>>> from Bio import AlignIO
>>> align = AlignIO.read("Clustalw/opuntia.aln", "clustal")
>>> print(align)
SingleLetterAlphabet() alignment with 7 rows and 156 columns
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273285|gb|AF191659.1|AF191
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273284|gb|AF191658.1|AF191
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273287|gb|AF191661.1|AF191
TATACATAAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273286|gb|AF191660.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273290|gb|AF191664.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273289|gb|AF191663.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273291|gb|AF191665.1|AF191
In some respects you can treat these objects as lists of SeqRecord objects,
each representing a row of the alignment. Iterating over an alignment gives
the SeqRecord object for each row:
>>> len(align)
7
>>> for record in align:
... print("%s %i" % (record.id, len(record)))
...
gi|6273285|gb|AF191659.1|AF191 156
gi|6273284|gb|AF191658.1|AF191 156
gi|6273287|gb|AF191661.1|AF191 156
gi|6273286|gb|AF191660.1|AF191 156
gi|6273290|gb|AF191664.1|AF191 156
gi|6273289|gb|AF191663.1|AF191 156
gi|6273291|gb|AF191665.1|AF191 156
You can also access individual rows as SeqRecord objects via their index:
>>> print(align[0].id)
gi|6273285|gb|AF191659.1|AF191
>>> print(align[-1].id)
gi|6273291|gb|AF191665.1|AF191
And extract columns as strings:
>>> print(align[:, 1])
AAAAAAA
Or, take just the first ten columns as a sub-alignment:
>>> print(align[:, :10])
SingleLetterAlphabet() alignment with 7 rows and 10 columns
TATACATTAA gi|6273285|gb|AF191659.1|AF191
TATACATTAA gi|6273284|gb|AF191658.1|AF191
TATACATTAA gi|6273287|gb|AF191661.1|AF191
TATACATAAA gi|6273286|gb|AF191660.1|AF191
TATACATTAA gi|6273290|gb|AF191664.1|AF191
TATACATTAA gi|6273289|gb|AF191663.1|AF191
TATACATTAA gi|6273291|gb|AF191665.1|AF191
Combining this alignment slicing with alignment addition allows you to
remove a section of the alignment. For example, taking just the first
and last ten columns:
>>> print(align[:, :10] + align[:, -10:])
SingleLetterAlphabet() alignment with 7 rows and 20 columns
TATACATTAAGTGTACCAGA gi|6273285|gb|AF191659.1|AF191
TATACATTAAGTGTACCAGA gi|6273284|gb|AF191658.1|AF191
TATACATTAAGTGTACCAGA gi|6273287|gb|AF191661.1|AF191
TATACATAAAGTGTACCAGA gi|6273286|gb|AF191660.1|AF191
TATACATTAAGTGTACCAGA gi|6273290|gb|AF191664.1|AF191
TATACATTAAGTATACCAGA gi|6273289|gb|AF191663.1|AF191
TATACATTAAGTGTACCAGA gi|6273291|gb|AF191665.1|AF191
Note - This object replaced the older Alignment object defined in module
Bio.Align.Generic but is not fully backwards compatible with it.
Note - This object does NOT attempt to model the kind of alignments used
in next generation sequencing with multiple sequencing reads which are
much shorter than the alignment, and where there is usually a consensus or
reference sequence with special status.
"""
def __init__(
self, records, alphabet=None, annotations=None, column_annotations=None
):
"""Initialize a new MultipleSeqAlignment object.
Arguments:
- records - A list (or iterator) of SeqRecord objects, whose
sequences are all the same length. This may be an be an
empty list.
- alphabet - The alphabet for the whole alignment, typically a gapped
alphabet, which should be a super-set of the individual
record alphabets. If omitted, a consensus alphabet is
used.
- annotations - Information about the whole alignment (dictionary).
- column_annotations - Per column annotation (restricted dictionary).
This holds Python sequences (lists, strings, tuples)
whose length matches the number of columns. A typical
use would be a secondary structure consensus string.
You would normally load a MSA from a file using Bio.AlignIO, but you
can do this from a list of SeqRecord objects too:
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Align import MultipleSeqAlignment
>>> a = SeqRecord(Seq("AAAACGT", generic_dna), id="Alpha")
>>> b = SeqRecord(Seq("AAA-CGT", generic_dna), id="Beta")
>>> c = SeqRecord(Seq("AAAAGGT", generic_dna), id="Gamma")
>>> align = MultipleSeqAlignment([a, b, c],
... annotations={"tool": "demo"},
... column_annotations={"stats": "CCCXCCC"})
>>> print(align)
DNAAlphabet() alignment with 3 rows and 7 columns
AAAACGT Alpha
AAA-CGT Beta
AAAAGGT Gamma
>>> align.annotations
{'tool': 'demo'}
>>> align.column_annotations
{'stats': 'CCCXCCC'}
"""
if alphabet is not None:
if not isinstance(alphabet, (Alphabet.Alphabet, Alphabet.AlphabetEncoder)):
raise ValueError("Invalid alphabet argument")
self._alphabet = alphabet
else:
# Default while we add sequences, will take a consensus later
self._alphabet = Alphabet.single_letter_alphabet
self._records = []
if records:
self.extend(records)
if alphabet is None:
# No alphabet was given, take a consensus alphabet
self._alphabet = Alphabet._consensus_alphabet(
rec.seq.alphabet for rec in self._records if rec.seq is not None
)
# Annotations about the whole alignment
if annotations is None:
annotations = {}
elif not isinstance(annotations, dict):
raise TypeError("annotations argument should be a dict")
self.annotations = annotations
# Annotations about each colum of the alignment
if column_annotations is None:
column_annotations = {}
# Handle this via the property set function which will validate it
self.column_annotations = column_annotations
def _set_per_column_annotations(self, value):
if not isinstance(value, dict):
raise TypeError(
"The per-column-annotations should be a (restricted) dictionary."
)
# Turn this into a restricted-dictionary (and check the entries)
if len(self):
# Use the standard method to get the length
expected_length = self.get_alignment_length()
self._per_col_annotations = _RestrictedDict(length=expected_length)
self._per_col_annotations.update(value)
else:
# Bit of a problem case... number of columns is undefined
self._per_col_annotations = None
if value:
raise ValueError(
"Can't set per-column-annotations without an alignment"
)
def _get_per_column_annotations(self):
if self._per_col_annotations is None:
# This happens if empty at initialisation
if len(self):
# Use the standard method to get the length
expected_length = self.get_alignment_length()
else:
# Should this raise an exception? Compare SeqRecord behaviour...
expected_length = 0
self._per_col_annotations = _RestrictedDict(length=expected_length)
return self._per_col_annotations
column_annotations = property(
fget=_get_per_column_annotations,
fset=_set_per_column_annotations,
doc="""Dictionary of per-letter-annotation for the sequence.""",
)
def _str_line(self, record, length=50):
"""Return a truncated string representation of a SeqRecord (PRIVATE).
This is a PRIVATE function used by the __str__ method.
"""
if record.seq.__class__.__name__ == "CodonSeq":
if len(record.seq) <= length:
return "%s %s" % (record.seq, record.id)
else:
return "%s...%s %s" % (
record.seq[: length - 3],
record.seq[-3:],
record.id,
)
else:
if len(record.seq) <= length:
return "%s %s" % (record.seq, record.id)
else:
return "%s...%s %s" % (
record.seq[: length - 6],
record.seq[-3:],
record.id,
)
def __str__(self):
"""Return a multi-line string summary of the alignment.
This output is intended to be readable, but large alignments are
shown truncated. A maximum of 20 rows (sequences) and 50 columns
are shown, with the record identifiers. This should fit nicely on a
single screen. e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align import MultipleSeqAlignment
>>> align = MultipleSeqAlignment([], Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> print(align)
Gapped(IUPACUnambiguousDNA(), '-') alignment with 3 rows and 12 columns
ACTGCTAGCTAG Alpha
ACT-CTAGCTAG Beta
ACTGCTAGATAG Gamma
See also the alignment's format method.
"""
rows = len(self._records)
lines = [
"%s alignment with %i rows and %i columns"
% (str(self._alphabet), rows, self.get_alignment_length())
]
if rows <= 20:
lines.extend(self._str_line(rec) for rec in self._records)
else:
lines.extend(self._str_line(rec) for rec in self._records[:18])
lines.append("...")
lines.append(self._str_line(self._records[-1]))
return "\n".join(lines)
def __repr__(self):
"""Return a representation of the object for debugging.
The representation cannot be used with eval() to recreate the object,
which is usually possible with simple python ojects. For example:
<Bio.Align.MultipleSeqAlignment instance (2 records of length 14,
SingleLetterAlphabet()) at a3c184c>
The hex string is the memory address of the object, see help(id).
This provides a simple way to visually distinguish alignments of
the same size.
"""
# A doctest for __repr__ would be nice, but __class__ comes out differently
# if run via the __main__ trick.
return "<%s instance (%i records of length %i, %s) at %x>" % (
self.__class__,
len(self._records),
self.get_alignment_length(),
repr(self._alphabet),
id(self),
)
# This version is useful for doing eval(repr(alignment)),
# but it can be VERY long:
# return "%s(%s, %s)" \
# % (self.__class__, repr(self._records), repr(self._alphabet))
def format(self, format_spec):
"""Return the alignment as a string in the specified file format [DEPRECATED].
This method is deprecated; instead of alignment.format(format_spec),
please use format(alignment, format_spec) or an f-string.
"""
warnings.warn(
"alignment.format has been deprecated, and we intend to remove it in a "
"future release of Biopython. Instead of alignment.format(format_spec), "
"please use format(alignment, format_spec) or an f-string.",
BiopythonDeprecationWarning,
)
return self.__format__(format_spec)
def __format__(self, format_spec):
"""Return the alignment as a string in the specified file format.
The format should be a lower case string supported as an output
format by Bio.AlignIO (such as "fasta", "clustal", "phylip",
"stockholm", etc), which is used to turn the alignment into a
string.
e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align import MultipleSeqAlignment
>>> align = MultipleSeqAlignment([], Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> print(format(align, "fasta"))
>Alpha
ACTGCTAGCTAG
>Beta
ACT-CTAGCTAG
>Gamma
ACTGCTAGATAG
<BLANKLINE>
>>> print(format(align, "phylip"))
3 12
Alpha ACTGCTAGCT AG
Beta ACT-CTAGCT AG
Gamma ACTGCTAGAT AG
<BLANKLINE>
"""
if format_spec:
from io import StringIO
from Bio import AlignIO
handle = StringIO()
AlignIO.write([self], handle, format_spec)
return handle.getvalue()
else:
# Follow python convention and default to using __str__
return str(self)
def __iter__(self):
"""Iterate over alignment rows as SeqRecord objects.
e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align import MultipleSeqAlignment
>>> align = MultipleSeqAlignment([], Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> for record in align:
... print(record.id)
... print(record.seq)
...
Alpha
ACTGCTAGCTAG
Beta
ACT-CTAGCTAG
Gamma
ACTGCTAGATAG
"""
return iter(self._records)
def __len__(self):
"""Return the number of sequences in the alignment.
Use len(alignment) to get the number of sequences (i.e. the number of
rows), and alignment.get_alignment_length() to get the length of the
longest sequence (i.e. the number of columns).
This is easy to remember if you think of the alignment as being like a
list of SeqRecord objects.
"""
return len(self._records)
def get_alignment_length(self):
"""Return the maximum length of the alignment.
All objects in the alignment should (hopefully) have the same
length. This function will go through and find this length
by finding the maximum length of sequences in the alignment.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align import MultipleSeqAlignment
>>> align = MultipleSeqAlignment([], Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> align.get_alignment_length()
12
If you want to know the number of sequences in the alignment,
use len(align) instead:
>>> len(align)
3
"""
max_length = 0
for record in self._records:
if len(record.seq) > max_length:
max_length = len(record.seq)
return max_length
def add_sequence(self, descriptor, sequence, start=None, end=None, weight=1.0):
"""Add a sequence to the alignment.
This doesn't do any kind of alignment, it just adds in the sequence
object, which is assumed to be prealigned with the existing
sequences.
Arguments:
- descriptor - The descriptive id of the sequence being added.
This will be used as the resulting SeqRecord's
.id property (and, for historical compatibility,
also the .description property)
- sequence - A string with sequence info.
- start - You can explicitly set the start point of the sequence.
This is useful (at least) for BLAST alignments, which can
just be partial alignments of sequences.
- end - Specify the end of the sequence, which is important
for the same reason as the start.
- weight - The weight to place on the sequence in the alignment.
By default, all sequences have the same weight. (0.0 =>
no weight, 1.0 => highest weight)
In general providing a SeqRecord and calling .append is preferred.
"""
new_seq = Seq(sequence, self._alphabet)
# We are now effectively using the SeqRecord's .id as
# the primary identifier (e.g. in Bio.SeqIO) so we should
# populate it with the descriptor.
# For backwards compatibility, also store this in the
# SeqRecord's description property.
new_record = SeqRecord(new_seq, id=descriptor, description=descriptor)
# hack! We really need to work out how to deal with annotations
# and features in biopython. Right now, I'll just use the
# generic annotations dictionary we've got to store the start
# and end, but we should think up something better. I don't know
# if I'm really a big fan of the LocatableSeq thing they've got
# in BioPerl, but I'm not positive what the best thing to do on
# this is...
if start:
new_record.annotations["start"] = start
if end:
new_record.annotations["end"] = end
# another hack to add weight information to the sequence
new_record.annotations["weight"] = weight
self._records.append(new_record)
def extend(self, records):
"""Add more SeqRecord objects to the alignment as rows.
They must all have the same length as the original alignment, and have
alphabets compatible with the alignment's alphabet. For example,
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Align import MultipleSeqAlignment
>>> a = SeqRecord(Seq("AAAACGT", generic_dna), id="Alpha")
>>> b = SeqRecord(Seq("AAA-CGT", generic_dna), id="Beta")
>>> c = SeqRecord(Seq("AAAAGGT", generic_dna), id="Gamma")
>>> d = SeqRecord(Seq("AAAACGT", generic_dna), id="Delta")
>>> e = SeqRecord(Seq("AAA-GGT", generic_dna), id="Epsilon")
First we create a small alignment (three rows):
>>> align = MultipleSeqAlignment([a, b, c])
>>> print(align)
DNAAlphabet() alignment with 3 rows and 7 columns
AAAACGT Alpha
AAA-CGT Beta
AAAAGGT Gamma
Now we can extend this alignment with another two rows:
>>> align.extend([d, e])
>>> print(align)
DNAAlphabet() alignment with 5 rows and 7 columns
AAAACGT Alpha
AAA-CGT Beta
AAAAGGT Gamma
AAAACGT Delta
AAA-GGT Epsilon
Because the alignment object allows iteration over the rows as
SeqRecords, you can use the extend method with a second alignment
(provided its sequences have the same length as the original alignment).
"""
if len(self):
# Use the standard method to get the length
expected_length = self.get_alignment_length()
else:
# Take the first record's length
records = iter(records) # records arg could be list or iterator
try:
rec = next(records)
except StopIteration:
# Special case, no records
return
expected_length = len(rec)
self._append(rec, expected_length)
# Can now setup the per-column-annotations as well, set to None
# while missing the length:
self.column_annotations = {}
# Now continue to the rest of the records as usual
for rec in records:
self._append(rec, expected_length)
def append(self, record):
"""Add one more SeqRecord object to the alignment as a new row.
This must have the same length as the original alignment (unless this is
the first record), and have an alphabet compatible with the alignment's
alphabet.
>>> from Bio import AlignIO
>>> align = AlignIO.read("Clustalw/opuntia.aln", "clustal")
>>> print(align)
SingleLetterAlphabet() alignment with 7 rows and 156 columns
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273285|gb|AF191659.1|AF191
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273284|gb|AF191658.1|AF191
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273287|gb|AF191661.1|AF191
TATACATAAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273286|gb|AF191660.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273290|gb|AF191664.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273289|gb|AF191663.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273291|gb|AF191665.1|AF191
>>> len(align)
7
We'll now construct a dummy record to append as an example:
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> dummy = SeqRecord(Seq("N"*156), id="dummy")
Now append this to the alignment,
>>> align.append(dummy)
>>> print(align)
SingleLetterAlphabet() alignment with 8 rows and 156 columns
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273285|gb|AF191659.1|AF191
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273284|gb|AF191658.1|AF191
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273287|gb|AF191661.1|AF191
TATACATAAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273286|gb|AF191660.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273290|gb|AF191664.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273289|gb|AF191663.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273291|gb|AF191665.1|AF191
NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN...NNN dummy
>>> len(align)
8
"""
if self._records:
self._append(record, self.get_alignment_length())
else:
self._append(record)
def _append(self, record, expected_length=None):
"""Validate and append a record (PRIVATE)."""
if not isinstance(record, SeqRecord):
raise TypeError("New sequence is not a SeqRecord object")
# Currently the get_alignment_length() call is expensive, so we need
# to avoid calling it repeatedly for __init__ and extend, hence this
# private _append method
if expected_length is not None and len(record) != expected_length:
# TODO - Use the following more helpful error, but update unit tests
# raise ValueError("New sequence is not of length %i"
# % self.get_alignment_length())
raise ValueError("Sequences must all be the same length")
# Using not self.alphabet.contains(record.seq.alphabet) needs fixing
# for AlphabetEncoders (e.g. gapped versus ungapped).
if not Alphabet._check_type_compatible([self._alphabet, record.seq.alphabet]):
raise ValueError("New sequence's alphabet is incompatible")
self._records.append(record)
def __add__(self, other):
"""Combine two alignments with the same number of rows by adding them.
If you have two multiple sequence alignments (MSAs), there are two ways to think
about adding them - by row or by column. Using the extend method adds by row.
Using the addition operator adds by column. For example,
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Align import MultipleSeqAlignment
>>> a1 = SeqRecord(Seq("AAAAC", generic_dna), id="Alpha")
>>> b1 = SeqRecord(Seq("AAA-C", generic_dna), id="Beta")
>>> c1 = SeqRecord(Seq("AAAAG", generic_dna), id="Gamma")
>>> a2 = SeqRecord(Seq("GT", generic_dna), id="Alpha")
>>> b2 = SeqRecord(Seq("GT", generic_dna), id="Beta")
>>> c2 = SeqRecord(Seq("GT", generic_dna), id="Gamma")
>>> left = MultipleSeqAlignment([a1, b1, c1],
... annotations={"tool": "demo", "name": "start"},
... column_annotations={"stats": "CCCXC"})
>>> right = MultipleSeqAlignment([a2, b2, c2],
... annotations={"tool": "demo", "name": "end"},
... column_annotations={"stats": "CC"})
Now, let's look at these two alignments:
>>> print(left)
DNAAlphabet() alignment with 3 rows and 5 columns
AAAAC Alpha
AAA-C Beta
AAAAG Gamma
>>> print(right)
DNAAlphabet() alignment with 3 rows and 2 columns
GT Alpha
GT Beta
GT Gamma
And add them:
>>> combined = left + right
>>> print(combined)
DNAAlphabet() alignment with 3 rows and 7 columns
AAAACGT Alpha
AAA-CGT Beta
AAAAGGT Gamma
For this to work, both alignments must have the same number of records (here
they both have 3 rows):
>>> len(left)
3
>>> len(right)
3
>>> len(combined)
3
The individual rows are SeqRecord objects, and these can be added together. Refer
to the SeqRecord documentation for details of how the annotation is handled. This
example is a special case in that both original alignments shared the same names,
meaning when the rows are added they also get the same name.
Any common annotations are preserved, but differing annotation is lost. This is
the same behaviour used in the SeqRecord annotations and is designed to prevent
accidental propagation of inappropriate values:
>>> combined.annotations
{'tool': 'demo'}
Similarly any common per-column-annotations are combined:
>>> combined.column_annotations
{'stats': 'CCCXCCC'}
"""
if not isinstance(other, MultipleSeqAlignment):
raise NotImplementedError
if len(self) != len(other):
raise ValueError(
"When adding two alignments they must have the same length"
" (i.e. same number or rows)"
)
alpha = Alphabet._consensus_alphabet([self._alphabet, other._alphabet])
merged = (left + right for left, right in zip(self, other))
# Take any common annotation:
annotations = {}
for k, v in self.annotations.items():
if k in other.annotations and other.annotations[k] == v:
annotations[k] = v
column_annotations = {}
for k, v in self.column_annotations.items():
if k in other.column_annotations:
column_annotations[k] = v + other.column_annotations[k]
return MultipleSeqAlignment(merged, alpha, annotations, column_annotations)
def __getitem__(self, index):
"""Access part of the alignment.
Depending on the indices, you can get a SeqRecord object
(representing a single row), a Seq object (for a single columns),
a string (for a single characters) or another alignment
(representing some part or all of the alignment).
align[r,c] gives a single character as a string
align[r] gives a row as a SeqRecord
align[r,:] gives a row as a SeqRecord
align[:,c] gives a column as a Seq (using the alignment's alphabet)
align[:] and align[:,:] give a copy of the alignment
Anything else gives a sub alignment, e.g.
align[0:2] or align[0:2,:] uses only row 0 and 1
align[:,1:3] uses only columns 1 and 2
align[0:2,1:3] uses only rows 0 & 1 and only cols 1 & 2
We'll use the following example alignment here for illustration:
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Align import MultipleSeqAlignment
>>> a = SeqRecord(Seq("AAAACGT", generic_dna), id="Alpha")
>>> b = SeqRecord(Seq("AAA-CGT", generic_dna), id="Beta")
>>> c = SeqRecord(Seq("AAAAGGT", generic_dna), id="Gamma")
>>> d = SeqRecord(Seq("AAAACGT", generic_dna), id="Delta")
>>> e = SeqRecord(Seq("AAA-GGT", generic_dna), id="Epsilon")
>>> align = MultipleSeqAlignment([a, b, c, d, e], generic_dna)
You can access a row of the alignment as a SeqRecord using an integer
index (think of the alignment as a list of SeqRecord objects here):
>>> first_record = align[0]
>>> print("%s %s" % (first_record.id, first_record.seq))
Alpha AAAACGT
>>> last_record = align[-1]
>>> print("%s %s" % (last_record.id, last_record.seq))
Epsilon AAA-GGT
You can also access use python's slice notation to create a sub-alignment
containing only some of the SeqRecord objects:
>>> sub_alignment = align[2:5]
>>> print(sub_alignment)
DNAAlphabet() alignment with 3 rows and 7 columns
AAAAGGT Gamma
AAAACGT Delta
AAA-GGT Epsilon
This includes support for a step, i.e. align[start:end:step], which
can be used to select every second sequence:
>>> sub_alignment = align[::2]
>>> print(sub_alignment)
DNAAlphabet() alignment with 3 rows and 7 columns
AAAACGT Alpha
AAAAGGT Gamma
AAA-GGT Epsilon
Or to get a copy of the alignment with the rows in reverse order:
>>> rev_alignment = align[::-1]
>>> print(rev_alignment)
DNAAlphabet() alignment with 5 rows and 7 columns
AAA-GGT Epsilon
AAAACGT Delta
AAAAGGT Gamma
AAA-CGT Beta
AAAACGT Alpha
You can also use two indices to specify both rows and columns. Using simple
integers gives you the entry as a single character string. e.g.
>>> align[3, 4]
'C'
This is equivalent to:
>>> align[3][4]
'C'
or:
>>> align[3].seq[4]
'C'
To get a single column (as a string) use this syntax:
>>> align[:, 4]
'CCGCG'
Or, to get part of a column,
>>> align[1:3, 4]
'CG'
However, in general you get a sub-alignment,
>>> print(align[1:5, 3:6])
DNAAlphabet() alignment with 4 rows and 3 columns
-CG Beta
AGG Gamma
ACG Delta
-GG Epsilon
This should all seem familiar to anyone who has used the NumPy
array or matrix objects.
"""
if isinstance(index, int):
# e.g. result = align[x]
# Return a SeqRecord
return self._records[index]
elif isinstance(index, slice):
# e.g. sub_align = align[i:j:k]
new = MultipleSeqAlignment(self._records[index], self._alphabet)
if self.column_annotations and len(new) == len(self):
# All rows kept (although could have been reversed)
# Perserve the column annotations too,
for k, v in self.column_annotations.items():
new.column_annotations[k] = v
return new
elif len(index) != 2:
raise TypeError("Invalid index type.")
# Handle double indexing
row_index, col_index = index
if isinstance(row_index, int):
# e.g. row_or_part_row = align[6, 1:4], gives a SeqRecord
return self._records[row_index][col_index]
elif isinstance(col_index, int):
# e.g. col_or_part_col = align[1:5, 6], gives a string
return "".join(rec[col_index] for rec in self._records[row_index])
else:
# e.g. sub_align = align[1:4, 5:7], gives another alignment
new = MultipleSeqAlignment(
(rec[col_index] for rec in self._records[row_index]), self._alphabet
)
if self.column_annotations and len(new) == len(self):
# All rows kept (although could have been reversed)
# Perserve the column annotations too,
for k, v in self.column_annotations.items():
new.column_annotations[k] = v[col_index]
return new
def sort(self, key=None, reverse=False):
"""Sort the rows (SeqRecord objects) of the alignment in place.
This sorts the rows alphabetically using the SeqRecord object id by
default. The sorting can be controlled by supplying a key function
which must map each SeqRecord to a sort value.
This is useful if you want to add two alignments which use the same
record identifiers, but in a different order. For example,
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Align import MultipleSeqAlignment
>>> align1 = MultipleSeqAlignment([
... SeqRecord(Seq("ACGT", generic_dna), id="Human"),
... SeqRecord(Seq("ACGG", generic_dna), id="Mouse"),
... SeqRecord(Seq("ACGC", generic_dna), id="Chicken"),
... ])
>>> align2 = MultipleSeqAlignment([
... SeqRecord(Seq("CGGT", generic_dna), id="Mouse"),
... SeqRecord(Seq("CGTT", generic_dna), id="Human"),
... SeqRecord(Seq("CGCT", generic_dna), id="Chicken"),
... ])
If you simple try and add these without sorting, you get this:
>>> print(align1 + align2)
DNAAlphabet() alignment with 3 rows and 8 columns
ACGTCGGT <unknown id>
ACGGCGTT <unknown id>
ACGCCGCT Chicken
Consult the SeqRecord documentation which explains why you get a
default value when annotation like the identifier doesn't match up.
However, if we sort the alignments first, then add them we get the
desired result:
>>> align1.sort()
>>> align2.sort()
>>> print(align1 + align2)
DNAAlphabet() alignment with 3 rows and 8 columns
ACGCCGCT Chicken
ACGTCGTT Human
ACGGCGGT Mouse
As an example using a different sort order, you could sort on the
GC content of each sequence.
>>> from Bio.SeqUtils import GC
>>> print(align1)
DNAAlphabet() alignment with 3 rows and 4 columns
ACGC Chicken
ACGT Human
ACGG Mouse
>>> align1.sort(key = lambda record: GC(record.seq))
>>> print(align1)
DNAAlphabet() alignment with 3 rows and 4 columns
ACGT Human
ACGC Chicken
ACGG Mouse
There is also a reverse argument, so if you wanted to sort by ID
but backwards:
>>> align1.sort(reverse=True)
>>> print(align1)
DNAAlphabet() alignment with 3 rows and 4 columns
ACGG Mouse
ACGT Human
ACGC Chicken
"""
if key is None:
self._records.sort(key=lambda r: r.id, reverse=reverse)
else:
self._records.sort(key=key, reverse=reverse)
class PairwiseAlignment:
"""Represents a pairwise sequence alignment.
Internally, the pairwise alignment is stored as the path through
the traceback matrix, i.e. a tuple of pairs of indices corresponding
to the vertices of the path in the traceback matrix.
"""
def __init__(self, target, query, path, score):
"""Initialize a new PairwiseAlignment object.
Arguments:
- target - The first sequence, as a plain string, without gaps.
- query - The second sequence, as a plain string, without gaps.
- path - The path through the traceback matrix, defining an
alignment.
- score - The alignment score.
You would normally obtain a PairwiseAlignment object by iterating
over a PairwiseAlignments object.
"""
self.target = target
self.query = query
self.score = score
self.path = path
def __eq__(self, other):
return self.path == other.path
def __ne__(self, other):
return self.path != other.path
def __lt__(self, other):
return self.path < other.path
def __le__(self, other):
return self.path <= other.path
def __gt__(self, other):
return self.path > other.path
def __ge__(self, other):
return self.path >= other.path
def _convert_sequence_string(self, sequence):
if isinstance(sequence, str):
return sequence
if isinstance(sequence, Seq):
return str(sequence)
try: # check if target is a SeqRecord
sequence = sequence.seq
except AttributeError:
pass
else:
return str(sequence)
try:
view = memoryview(sequence)
except TypeError:
pass
else:
if view.format == "c":
return str(sequence)
return None
def __format__(self, format_spec):
"""Create a human-readable representation of the alignment."""
if format_spec == "psl":
return self._format_psl()
seq1 = self._convert_sequence_string(self.target)
if seq1 is None:
return self._format_generalized()
seq2 = self._convert_sequence_string(self.query)
if seq2 is None:
return self._format_generalized()
n1 = len(seq1)
n2 = len(seq2)
aligned_seq1 = ""
aligned_seq2 = ""
pattern = ""
path = self.path
end1, end2 = path[0]
if end1 > 0 or end2 > 0:
end = max(end1, end2)
aligned_seq1 += " " * (end - end1) + seq1[:end1]
aligned_seq2 += " " * (end - end2) + seq2[:end2]
pattern += " " * end
start1 = end1
start2 = end2
for end1, end2 in path[1:]:
gap = 0
if end1 == start1:
gap = end2 - start2
aligned_seq1 += "-" * gap
aligned_seq2 += seq2[start2:end2]
pattern += "-" * gap
elif end2 == start2:
gap = end1 - start1
aligned_seq1 += seq1[start1:end1]
aligned_seq2 += "-" * gap
pattern += "-" * gap
else:
s1 = seq1[start1:end1]
s2 = seq2[start2:end2]
aligned_seq1 += s1
aligned_seq2 += s2
for c1, c2 in zip(s1, s2):
if c1 == c2:
pattern += "|"
else:
pattern += "."
start1 = end1
start2 = end2
n1 -= end1
n2 -= end2
n = max(n1, n2)
aligned_seq1 += seq1[end1:] + " " * (n - n1)
aligned_seq2 += seq2[end2:] + " " * (n - n2)
pattern += " " * n
return "%s\n%s\n%s\n" % (aligned_seq1, pattern, aligned_seq2)
def _format_generalized(self):
seq1 = self.target
seq2 = self.query
n1 = len(seq1)
n2 = len(seq2)
aligned_seq1 = []
aligned_seq2 = []
pattern = []
path = self.path
end1, end2 = path[0]
if end1 > 0 or end2 > 0:
if end1 <= end2:
for c2 in seq2[: end2 - end1]:
s2 = str(c2)
s1 = " " * len(s2)
aligned_seq1.append(s1)
aligned_seq2.append(s2)
pattern.append(s1)
else: # end1 > end2
for c1 in seq1[: end1 - end2]:
s1 = str(c1)
s2 = " " * len(s1)
aligned_seq1.append(s1)
aligned_seq2.append(s2)
pattern.append(s2)
start1 = end1
start2 = end2
for end1, end2 in path[1:]:
if end1 == start1:
for c2 in seq2[start2:end2]:
s2 = str(c2)
s1 = "-" * len(s2)
aligned_seq1.append(s1)
aligned_seq2.append(s2)
pattern.append(s1)
start2 = end2
elif end2 == start2:
for c1 in seq1[start1:end1]:
s1 = str(c1)
s2 = "-" * len(s1)
aligned_seq1.append(s1)
aligned_seq2.append(s2)
pattern.append(s2)
start1 = end1
else:
for c1, c2 in zip(seq1[start1:end1], seq2[start2:end2]):
s1 = str(c1)
s2 = str(c2)
m1 = len(s1)
m2 = len(s2)
if c1 == c2:
p = "|"
else:
p = "."
if m1 < m2:
space = (m2 - m1) * " "
s1 += space
pattern.append(p * m1 + space)
elif m1 > m2:
space = (m1 - m2) * " "
s2 += space
pattern.append(p * m2 + space)
else:
pattern.append(p * m1)
aligned_seq1.append(s1)
aligned_seq2.append(s2)
start1 = end1
start2 = end2
aligned_seq1 = " ".join(aligned_seq1)
aligned_seq2 = " ".join(aligned_seq2)
pattern = " ".join(pattern)
return "%s\n%s\n%s\n" % (aligned_seq1, pattern, aligned_seq2)
def _format_psl(self):
query = self.query
target = self.target
try:
Qname = query.id
except AttributeError:
Qname = "query"
else:
query = query.seq
try:
Tname = target.id
except AttributeError:
Tname = "target"
else:
target = target.seq
seq1 = str(target)
seq2 = str(query)
n1 = len(seq1)
n2 = len(seq2)
match = 0
mismatch = 0
repmatch = 0
Ns = 0
Qgapcount = 0
Qgapbases = 0
Tgapcount = 0
Tgapbases = 0
Qsize = n2
Qstart = 0
Qend = Qsize
Tsize = n1
Tstart = 0
Tend = Tsize
blockSizes = []
qStarts = []
tStarts = []
strand = "+"
start1 = 0
start2 = 0
start1, start2 = self.path[0]
for end1, end2 in self.path[1:]:
count1 = end1 - start1
count2 = end2 - start2
if count1 == 0:
if start2 == 0:
Qstart += count2
elif end2 == n2:
Qend -= count2
else:
Qgapcount += 1
Qgapbases += count2
start2 = end2
elif count2 == 0:
if start1 == 0:
Tstart += count1
elif end1 == n1:
Tend -= count1
else:
Tgapcount += 1
Tgapbases += count1
start1 = end1
else:
assert count1 == count2
tStarts.append(start1)
qStarts.append(start2)
blockSizes.append(count1)
for c1, c2 in zip(seq1[start1:end1], seq2[start2:end2]):
if c1 == "N" or c2 == "N":
Ns += 1
elif c1 == c2:
match += 1
else:
mismatch += 1
start1 = end1
start2 = end2
blockcount = len(blockSizes)
blockSizes = ",".join(map(str, blockSizes)) + ","
qStarts = ",".join(map(str, qStarts)) + ","
tStarts = ",".join(map(str, tStarts)) + ","
words = [
str(match),
str(mismatch),
str(repmatch),
str(Ns),
str(Qgapcount),
str(Qgapbases),
str(Tgapcount),
str(Tgapbases),
strand,
Qname,
str(Qsize),
str(Qstart),
str(Qend),
Tname,
str(Tsize),
str(Tstart),
str(Tend),
str(blockcount),
blockSizes,
qStarts,
tStarts,
]
line = "\t".join(words) + "\n"
return line
def format(self):
"""Create a human-readable representation of the alignment (DEPRECATED).
This method is deprecated; instead of alignment.format(), please use
format(alignment) or an f-string.
"""
warnings.warn(
"alignment.format has been deprecated, and we intend to remove it in a "
"future release of Biopython. Instead of alignment.format(), please use "
"format(alignment) or an f-string.",
BiopythonDeprecationWarning,
)
return self.__format__(None)
def __str__(self):
return self.__format__(None)
@property
def aligned(self):
"""Return the indices of subsequences aligned to each other.
This property returns the start and end indices of subsequences
in the target and query sequence that were aligned to each other.
If the alignment between target (t) and query (q) consists of N
chunks, you get two tuples of length N:
(((t_start1, t_end1), (t_start2, t_end2), ..., (t_startN, t_endN)),
((q_start1, q_end1), (q_start2, q_end2), ..., (q_startN, q_endN)))
For example,
>>> from Bio import Align
>>> aligner = Align.PairwiseAligner()
>>> alignments = aligner.align("GAACT", "GAT")
>>> alignment = alignments[0]
>>> print(alignment)
GAACT
||--|
GA--T
<BLANKLINE>
>>> alignment.aligned
(((0, 2), (4, 5)), ((0, 2), (2, 3)))
>>> alignment = alignments[1]
>>> print(alignment)
GAACT
|-|-|
G-A-T
<BLANKLINE>
>>> alignment.aligned
(((0, 1), (2, 3), (4, 5)), ((0, 1), (1, 2), (2, 3)))
Note that different alignments may have the same subsequences
aligned to each other. In particular, this may occur if alignments
differ from each other in terms of their gap placement only:
>>> aligner.mismatch_score = -10
>>> alignments = aligner.align("AAACAAA", "AAAGAAA")
>>> len(alignments)
2
>>> print(alignments[0])
AAAC-AAA
|||--|||
AAA-GAAA
<BLANKLINE>
>>> alignments[0].aligned
(((0, 3), (4, 7)), ((0, 3), (4, 7)))
>>> print(alignments[1])
AAA-CAAA
|||--|||
AAAG-AAA
<BLANKLINE>
>>> alignments[1].aligned
(((0, 3), (4, 7)), ((0, 3), (4, 7)))
The property can be used to identify alignments that are identical
to each other in terms of their aligned sequences.
"""
segments1 = []
segments2 = []
i1, i2 = self.path[0]
for node in self.path[1:]:
j1, j2 = node
if j1 > i1 and j2 > i2:
segment1 = (i1, j1)
segment2 = (i2, j2)
segments1.append(segment1)
segments2.append(segment2)
i1, i2 = j1, j2
return tuple(segments1), tuple(segments2)
class PairwiseAlignments:
"""Implements an iterator over pairwise alignments returned by the aligner.
This class also supports indexing, which is fast for increasing indices,
but may be slow for random access of a large number of alignments.
Note that pairwise aligners can return an astronomical number of alignments,
even for relatively short sequences, if they align poorly to each other. We
therefore recommend to first check the number of alignments, accessible as
len(alignments), which can be calculated quickly even if the number of
alignments is very large.
"""
def __init__(self, seqA, seqB, score, paths):
"""Initialize a new PairwiseAlignments object.
Arguments:
- seqA - The first sequence, as a plain string, without gaps.
- seqB - The second sequence, as a plain string, without gaps.
- score - The alignment score.
- paths - An iterator over the paths in the traceback matrix;
each path defines one alignment.
You would normally obtain an PairwiseAlignments object by calling
aligner.align(seqA, seqB), where aligner is a PairwiseAligner object.
"""
self.seqA = seqA
self.seqB = seqB
self.score = score
self.paths = paths
self.index = -1
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
if index == self.index:
return self.alignment
if index < self.index:
self.paths.reset()
self.index = -1
while self.index < index:
try:
alignment = next(self)
except StopIteration:
raise IndexError("index out of range") from None
return alignment
def __iter__(self):
self.paths.reset()
self.index = -1
return self
def __next__(self):
path = next(self.paths)
self.index += 1
alignment = PairwiseAlignment(self.seqA, self.seqB, path, self.score)
self.alignment = alignment
return alignment
class PairwiseAligner(_aligners.PairwiseAligner):
"""Performs pairwise sequence alignment using dynamic programming.
This provides functions to get global and local alignments between two
sequences. A global alignment finds the best concordance between all
characters in two sequences. A local alignment finds just the
subsequences that align the best.
To perform a pairwise sequence alignment, first create a PairwiseAligner
object. This object stores the match and mismatch scores, as well as the
gap scores. Typically, match scores are positive, while mismatch scores
and gap scores are negative or zero. By default, the match score is 1,
and the mismatch and gap scores are zero. Based on the values of the gap
scores, a PairwiseAligner object automatically chooses the appropriate
alignment algorithm (the Needleman-Wunsch, Smith-Waterman, Gotoh, or
Waterman-Smith-Beyer global or local alignment algorithm).
Calling the "score" method on the aligner with two sequences as arguments
will calculate the alignment score between the two sequences.
Calling the "align" method on the aligner with two sequences as arguments
will return a generator yielding the alignments between the two
sequences.
Some examples:
>>> from Bio import Align
>>> aligner = Align.PairwiseAligner()
>>> alignments = aligner.align("TACCG", "ACG")
>>> for alignment in sorted(alignments):
... print("Score = %.1f:" % alignment.score)
... print(alignment)
...
Score = 3.0:
TACCG
-|-||
-A-CG
<BLANKLINE>
Score = 3.0:
TACCG
-||-|
-AC-G
<BLANKLINE>
Specify the aligner mode as local to generate local alignments:
>>> aligner.mode = 'local'
>>> alignments = aligner.align("TACCG", "ACG")
>>> for alignment in sorted(alignments):
... print("Score = %.1f:" % alignment.score)
... print(alignment)
...
Score = 3.0:
TACCG
|-||
A-CG
<BLANKLINE>
Score = 3.0:
TACCG
||-|
AC-G
<BLANKLINE>
Do a global alignment. Identical characters are given 2 points,
1 point is deducted for each non-identical character.
>>> aligner.mode = 'global'
>>> aligner.match_score = 2
>>> aligner.mismatch_score = -1
>>> for alignment in aligner.align("TACCG", "ACG"):
... print("Score = %.1f:" % alignment.score)
... print(alignment)
...
Score = 6.0:
TACCG
-||-|
-AC-G
<BLANKLINE>
Score = 6.0:
TACCG
-|-||
-A-CG
<BLANKLINE>
Same as above, except now 0.5 points are deducted when opening a
gap, and 0.1 points are deducted when extending it.
>>> aligner.open_gap_score = -0.5
>>> aligner.extend_gap_score = -0.1
>>> aligner.target_end_gap_score = 0.0
>>> aligner.query_end_gap_score = 0.0
>>> for alignment in aligner.align("TACCG", "ACG"):
... print("Score = %.1f:" % alignment.score)
... print(alignment)
...
Score = 5.5:
TACCG
-|-||
-A-CG
<BLANKLINE>
Score = 5.5:
TACCG
-||-|
-AC-G
<BLANKLINE>
The alignment function can also use known matrices already included in
Biopython:
>>> from Bio.Align import substitution_matrices
>>> aligner = Align.PairwiseAligner()
>>> aligner.substitution_matrix = substitution_matrices.load("BLOSUM62")
>>> alignments = aligner.align("KEVLA", "EVL")
>>> alignments = list(alignments)
>>> print("Number of alignments: %d" % len(alignments))
Number of alignments: 1
>>> alignment = alignments[0]
>>> print("Score = %.1f" % alignment.score)
Score = 13.0
>>> print(alignment)
KEVLA
-|||-
-EVL-
<BLANKLINE>
"""
def __setattr__(self, key, value):
if key not in dir(_aligners.PairwiseAligner):
# To prevent confusion, don't allow users to create new attributes
raise AttributeError("PairwiseAligner object has no attribute '%s'" % key)
_aligners.PairwiseAligner.__setattr__(self, key, value)
def align(self, seqA, seqB):
"""Return the alignments of two sequences using PairwiseAligner."""
if isinstance(seqA, Seq):
seqA = str(seqA)
if isinstance(seqB, Seq):
seqB = str(seqB)
score, paths = _aligners.PairwiseAligner.align(self, seqA, seqB)
alignments = PairwiseAlignments(seqA, seqB, score, paths)
return alignments
def score(self, seqA, seqB):
"""Return the alignments score of two sequences using PairwiseAligner."""
if isinstance(seqA, Seq):
seqA = str(seqA)
if isinstance(seqB, Seq):
seqB = str(seqB)
return _aligners.PairwiseAligner.score(self, seqA, seqB)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
gyp | b40bc5b25eec0c62ae7308bc2284e2aa8b90de87 | # GRPC GYP build file
# This file has been automatically generated from a template file.
# Please look at the templates directory instead.
# This file can be regenerated from the template by running
# tools/buildgen/generate_projects.sh
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
# The openssl and zlib dependencies must be passed in as variables
# defined in an included gypi file, usually common.gypi.
'openssl_gyp_target%': 'Please Define openssl_gyp_target variable',
'zlib_gyp_target%': 'Please Define zlib_gyp_target variable',
'grpc_gcov%': 'false',
'grpc_alpine%': 'false',
},
'target_defaults': {
'configurations': {
'Debug': {
'cflags': [
'-O0',
],
'defines': [
'_DEBUG',
'DEBUG',
],
},
'Release': {
'cflags': [
'-O2',
'-Wframe-larger-than=16384',
],
'defines': [
'NDEBUG',
],
},
},
'cflags': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
],
'ldflags': [
'-g',
],
'cflags_c': [
'-Werror',
'-std=c99',
],
'cflags_cc': [
'-Werror',
'-std=c++11',
],
'include_dirs': [
'.',
'../..',
'include',
],
'defines': [
'GRPC_ARES=0',
],
'dependencies': [
'<(openssl_gyp_target)',
'<(zlib_gyp_target)',
],
'conditions': [
['grpc_gcov=="true"', {
'cflags': [
'-O0',
'-fprofile-arcs',
'-ftest-coverage',
'-Wno-return-type',
],
'defines': [
'_DEBUG',
'DEBUG',
'GPR_GCOV',
],
'ldflags': [
'-fprofile-arcs',
'-ftest-coverage',
'-rdynamic',
'-lstdc++',
],
}],
['grpc_alpine=="true"', {
'defines': [
'GPR_MUSL_LIBC_COMPAT'
]
}],
['OS == "win"', {
'defines': [
'_WIN32_WINNT=0x0600',
'WIN32_LEAN_AND_MEAN',
'_HAS_EXCEPTIONS=0',
'UNICODE',
'_UNICODE',
'NOMINMAX',
],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 1, # static debug
}
},
"libraries": [
"ws2_32"
]
}],
['OS == "mac"', {
'xcode_settings': {
'OTHER_CFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
],
'OTHER_CPLUSPLUSFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-stdlib=libc++',
'-std=c++11',
'-Wno-error=deprecated-declarations',
],
},
}]
]
},
'targets': [
{
'target_name': 'address_sorting',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/address_sorting/address_sorting.c',
'third_party/address_sorting/address_sorting_posix.c',
'third_party/address_sorting/address_sorting_windows.c',
],
},
{
'target_name': 'end2end_nosec_tests',
'type': 'static_library',
'dependencies': [
'grpc_test_util',
'grpc',
'gpr',
'address_sorting',
'upb',
],
'sources': [
'test/core/end2end/cq_verifier.cc',
'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
'test/core/end2end/end2end_nosec_tests.cc',
'test/core/end2end/end2end_test_utils.cc',
'test/core/end2end/fixtures/http_proxy_fixture.cc',
'test/core/end2end/fixtures/local_util.cc',
'test/core/end2end/fixtures/proxy.cc',
'test/core/end2end/tests/authority_not_supported.cc',
'test/core/end2end/tests/bad_hostname.cc',
'test/core/end2end/tests/bad_ping.cc',
'test/core/end2end/tests/binary_metadata.cc',
'test/core/end2end/tests/call_host_override.cc',
'test/core/end2end/tests/cancel_after_accept.cc',
'test/core/end2end/tests/cancel_after_client_done.cc',
'test/core/end2end/tests/cancel_after_invoke.cc',
'test/core/end2end/tests/cancel_after_round_trip.cc',
'test/core/end2end/tests/cancel_before_invoke.cc',
'test/core/end2end/tests/cancel_in_a_vacuum.cc',
'test/core/end2end/tests/cancel_with_status.cc',
'test/core/end2end/tests/channelz.cc',
'test/core/end2end/tests/client_streaming.cc',
'test/core/end2end/tests/compressed_payload.cc',
'test/core/end2end/tests/connectivity.cc',
'test/core/end2end/tests/default_host.cc',
'test/core/end2end/tests/disappearing_server.cc',
'test/core/end2end/tests/empty_batch.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_context.cc',
'test/core/end2end/tests/filter_init_fails.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
'test/core/end2end/tests/idempotent_request.cc',
'test/core/end2end/tests/invoke_large_request.cc',
'test/core/end2end/tests/keepalive_timeout.cc',
'test/core/end2end/tests/large_metadata.cc',
'test/core/end2end/tests/max_concurrent_streams.cc',
'test/core/end2end/tests/max_connection_age.cc',
'test/core/end2end/tests/max_connection_idle.cc',
'test/core/end2end/tests/max_message_length.cc',
'test/core/end2end/tests/negative_deadline.cc',
'test/core/end2end/tests/no_error_on_hotpath.cc',
'test/core/end2end/tests/no_logging.cc',
'test/core/end2end/tests/no_op.cc',
'test/core/end2end/tests/payload.cc',
'test/core/end2end/tests/ping.cc',
'test/core/end2end/tests/ping_pong_streaming.cc',
'test/core/end2end/tests/proxy_auth.cc',
'test/core/end2end/tests/registered_call.cc',
'test/core/end2end/tests/request_with_flags.cc',
'test/core/end2end/tests/request_with_payload.cc',
'test/core/end2end/tests/resource_quota_server.cc',
'test/core/end2end/tests/retry.cc',
'test/core/end2end/tests/retry_cancellation.cc',
'test/core/end2end/tests/retry_disabled.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc',
'test/core/end2end/tests/retry_non_retriable_status.cc',
'test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc',
'test/core/end2end/tests/retry_recv_initial_metadata.cc',
'test/core/end2end/tests/retry_recv_message.cc',
'test/core/end2end/tests/retry_server_pushback_delay.cc',
'test/core/end2end/tests/retry_server_pushback_disabled.cc',
'test/core/end2end/tests/retry_streaming.cc',
'test/core/end2end/tests/retry_streaming_after_commit.cc',
'test/core/end2end/tests/retry_streaming_succeeds_before_replay_finished.cc',
'test/core/end2end/tests/retry_throttled.cc',
'test/core/end2end/tests/retry_too_many_attempts.cc',
'test/core/end2end/tests/server_finishes_request.cc',
'test/core/end2end/tests/server_streaming.cc',
'test/core/end2end/tests/shutdown_finishes_calls.cc',
'test/core/end2end/tests/shutdown_finishes_tags.cc',
'test/core/end2end/tests/simple_cacheable_request.cc',
'test/core/end2end/tests/simple_delayed_request.cc',
'test/core/end2end/tests/simple_metadata.cc',
'test/core/end2end/tests/simple_request.cc',
'test/core/end2end/tests/stream_compression_compressed_payload.cc',
'test/core/end2end/tests/stream_compression_payload.cc',
'test/core/end2end/tests/stream_compression_ping_pong_streaming.cc',
'test/core/end2end/tests/streaming_error_response.cc',
'test/core/end2end/tests/trailing_metadata.cc',
'test/core/end2end/tests/workaround_cronet_compression.cc',
'test/core/end2end/tests/write_buffering.cc',
'test/core/end2end/tests/write_buffering_at_end.cc',
],
},
{
'target_name': 'end2end_tests',
'type': 'static_library',
'dependencies': [
'grpc_test_util',
'grpc',
'gpr',
'address_sorting',
'upb',
],
'sources': [
'test/core/end2end/cq_verifier.cc',
'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
'test/core/end2end/end2end_test_utils.cc',
'test/core/end2end/end2end_tests.cc',
'test/core/end2end/fixtures/http_proxy_fixture.cc',
'test/core/end2end/fixtures/local_util.cc',
'test/core/end2end/fixtures/proxy.cc',
'test/core/end2end/tests/authority_not_supported.cc',
'test/core/end2end/tests/bad_hostname.cc',
'test/core/end2end/tests/bad_ping.cc',
'test/core/end2end/tests/binary_metadata.cc',
'test/core/end2end/tests/call_creds.cc',
'test/core/end2end/tests/call_host_override.cc',
'test/core/end2end/tests/cancel_after_accept.cc',
'test/core/end2end/tests/cancel_after_client_done.cc',
'test/core/end2end/tests/cancel_after_invoke.cc',
'test/core/end2end/tests/cancel_after_round_trip.cc',
'test/core/end2end/tests/cancel_before_invoke.cc',
'test/core/end2end/tests/cancel_in_a_vacuum.cc',
'test/core/end2end/tests/cancel_with_status.cc',
'test/core/end2end/tests/channelz.cc',
'test/core/end2end/tests/client_streaming.cc',
'test/core/end2end/tests/compressed_payload.cc',
'test/core/end2end/tests/connectivity.cc',
'test/core/end2end/tests/default_host.cc',
'test/core/end2end/tests/disappearing_server.cc',
'test/core/end2end/tests/empty_batch.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_context.cc',
'test/core/end2end/tests/filter_init_fails.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
'test/core/end2end/tests/idempotent_request.cc',
'test/core/end2end/tests/invoke_large_request.cc',
'test/core/end2end/tests/keepalive_timeout.cc',
'test/core/end2end/tests/large_metadata.cc',
'test/core/end2end/tests/max_concurrent_streams.cc',
'test/core/end2end/tests/max_connection_age.cc',
'test/core/end2end/tests/max_connection_idle.cc',
'test/core/end2end/tests/max_message_length.cc',
'test/core/end2end/tests/negative_deadline.cc',
'test/core/end2end/tests/no_error_on_hotpath.cc',
'test/core/end2end/tests/no_logging.cc',
'test/core/end2end/tests/no_op.cc',
'test/core/end2end/tests/payload.cc',
'test/core/end2end/tests/ping.cc',
'test/core/end2end/tests/ping_pong_streaming.cc',
'test/core/end2end/tests/proxy_auth.cc',
'test/core/end2end/tests/registered_call.cc',
'test/core/end2end/tests/request_with_flags.cc',
'test/core/end2end/tests/request_with_payload.cc',
'test/core/end2end/tests/resource_quota_server.cc',
'test/core/end2end/tests/retry.cc',
'test/core/end2end/tests/retry_cancellation.cc',
'test/core/end2end/tests/retry_disabled.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc',
'test/core/end2end/tests/retry_non_retriable_status.cc',
'test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc',
'test/core/end2end/tests/retry_recv_initial_metadata.cc',
'test/core/end2end/tests/retry_recv_message.cc',
'test/core/end2end/tests/retry_server_pushback_delay.cc',
'test/core/end2end/tests/retry_server_pushback_disabled.cc',
'test/core/end2end/tests/retry_streaming.cc',
'test/core/end2end/tests/retry_streaming_after_commit.cc',
'test/core/end2end/tests/retry_streaming_succeeds_before_replay_finished.cc',
'test/core/end2end/tests/retry_throttled.cc',
'test/core/end2end/tests/retry_too_many_attempts.cc',
'test/core/end2end/tests/server_finishes_request.cc',
'test/core/end2end/tests/server_streaming.cc',
'test/core/end2end/tests/shutdown_finishes_calls.cc',
'test/core/end2end/tests/shutdown_finishes_tags.cc',
'test/core/end2end/tests/simple_cacheable_request.cc',
'test/core/end2end/tests/simple_delayed_request.cc',
'test/core/end2end/tests/simple_metadata.cc',
'test/core/end2end/tests/simple_request.cc',
'test/core/end2end/tests/stream_compression_compressed_payload.cc',
'test/core/end2end/tests/stream_compression_payload.cc',
'test/core/end2end/tests/stream_compression_ping_pong_streaming.cc',
'test/core/end2end/tests/streaming_error_response.cc',
'test/core/end2end/tests/trailing_metadata.cc',
'test/core/end2end/tests/workaround_cronet_compression.cc',
'test/core/end2end/tests/write_buffering.cc',
'test/core/end2end/tests/write_buffering_at_end.cc',
],
},
{
'target_name': 'gpr',
'type': 'static_library',
'dependencies': [
'absl/types:optional',
'absl/time:time',
'absl/synchronization:synchronization',
'absl/strings:strings',
'absl/strings:str_format',
'absl/status:status',
'absl/memory:memory',
'absl/base:base',
],
'sources': [
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
'src/core/lib/gpr/cpu_posix.cc',
'src/core/lib/gpr/cpu_windows.cc',
'src/core/lib/gpr/env_linux.cc',
'src/core/lib/gpr/env_posix.cc',
'src/core/lib/gpr/env_windows.cc',
'src/core/lib/gpr/log.cc',
'src/core/lib/gpr/log_android.cc',
'src/core/lib/gpr/log_linux.cc',
'src/core/lib/gpr/log_posix.cc',
'src/core/lib/gpr/log_windows.cc',
'src/core/lib/gpr/murmur_hash.cc',
'src/core/lib/gpr/string.cc',
'src/core/lib/gpr/string_posix.cc',
'src/core/lib/gpr/string_util_windows.cc',
'src/core/lib/gpr/string_windows.cc',
'src/core/lib/gpr/sync.cc',
'src/core/lib/gpr/sync_abseil.cc',
'src/core/lib/gpr/sync_posix.cc',
'src/core/lib/gpr/sync_windows.cc',
'src/core/lib/gpr/time.cc',
'src/core/lib/gpr/time_posix.cc',
'src/core/lib/gpr/time_precise.cc',
'src/core/lib/gpr/time_windows.cc',
'src/core/lib/gpr/tls_pthread.cc',
'src/core/lib/gpr/tmpfile_msys.cc',
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/arena.cc',
'src/core/lib/gprpp/examine_stack.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/global_config_env.cc',
'src/core/lib/gprpp/host_port.cc',
'src/core/lib/gprpp/mpscq.cc',
'src/core/lib/gprpp/stat_posix.cc',
'src/core/lib/gprpp/stat_windows.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
'src/core/lib/gprpp/time_util.cc',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
],
},
{
'target_name': 'grpc',
'type': 'static_library',
'dependencies': [
'gpr',
'address_sorting',
'upb',
'absl/types:optional',
'absl/strings:strings',
'absl/status:statusor',
'absl/status:status',
'absl/functional:bind_front',
'absl/container:inlined_vector',
'absl/container:flat_hash_map',
],
'sources': [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_libuv.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/server_address.cc',
'src/core/ext/filters/client_channel/service_config.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/service_config_parser.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_idle/client_idle_filter.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/max_age/max_age_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc',
'src/core/ext/filters/workarounds/workaround_utils.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/authority.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc',
'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_plugin.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/incoming_metadata.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/envoy/annotations/deprecation.upb.c',
'src/core/ext/upb-generated/envoy/annotations/resource.upb.c',
'src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/address.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c',
'src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.c',
'src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/cert.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.c',
'src/core/ext/upb-generated/envoy/service/cluster/v3/cds.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.c',
'src/core/ext/upb-generated/envoy/service/endpoint/v3/eds.upb.c',
'src/core/ext/upb-generated/envoy/service/listener/v3/lds.upb.c',
'src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c',
'src/core/ext/upb-generated/envoy/service/route/v3/rds.upb.c',
'src/core/ext/upb-generated/envoy/service/route/v3/srds.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.c',
'src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/percent.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/range.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.c',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/udpa/annotations/migrate.upb.c',
'src/core/ext/upb-generated/udpa/annotations/security.upb.c',
'src/core/ext/upb-generated/udpa/annotations/sensitive.upb.c',
'src/core/ext/upb-generated/udpa/annotations/status.upb.c',
'src/core/ext/upb-generated/udpa/annotations/versioning.upb.c',
'src/core/ext/upb-generated/udpa/data/orca/v1/orca_load_report.upb.c',
'src/core/ext/upb-generated/udpa/type/v1/typed_struct.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/core/v3/authority.upb.c',
'src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.c',
'src/core/ext/upb-generated/xds/core/v3/context_params.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_name.upb.c',
'src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/circuit_breaker.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/filter.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/outlier_detection.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/address.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/backoff.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/event_service_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/scoped_route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/http_tracer.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/common/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/cert.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/cluster/v3/cds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/ads.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/endpoint/v3/eds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/listener/v3/lds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/route/v3/rds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/route/v3/srds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/path.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/value.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/metadata/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/tracing/v3/custom_tag.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/percent.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/range.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/semantic_version.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/annotations.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/http.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/any.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/duration.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/empty.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/timestamp.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/wrappers.upbdefs.c',
'src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/security.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/status.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/type/v1/typed_struct.upbdefs.c',
'src/core/ext/upbdefs-generated/validate/validate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/authority.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/collection_entry.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/context_params.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_locator.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_name.upbdefs.c',
'src/core/ext/xds/certificate_provider_registry.cc',
'src/core/ext/xds/certificate_provider_store.cc',
'src/core/ext/xds/file_watcher_certificate_provider_factory.cc',
'src/core/ext/xds/xds_api.cc',
'src/core/ext/xds/xds_bootstrap.cc',
'src/core/ext/xds/xds_certificate_provider.cc',
'src/core/ext/xds/xds_client.cc',
'src/core/ext/xds/xds_client_stats.cc',
'src/core/ext/xds/xds_http_fault_filter.cc',
'src/core/ext/xds/xds_http_filters.cc',
'src/core/ext/xds/xds_server_config_fetcher.cc',
'src/core/lib/avl/avl.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/handshaker.cc',
'src/core/lib/channel/handshaker_registry.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_args.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/compression/stream_compression.cc',
'src/core/lib/compression/stream_compression_gzip.cc',
'src/core/lib/compression/stream_compression_identity.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/httpcli_security_connector.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_uv.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_custom.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_uv.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/is_epollexclusive_available.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/parse_address.cc',
'src/core/lib/iomgr/poller/eventmanager_libuv.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_custom.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_custom.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_uv.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_custom.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/resource_quota.cc',
'src/core/lib/iomgr/sockaddr_utils.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_uv.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_custom.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_custom.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_custom.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_uv.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_custom.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/timer_uv.cc',
'src/core/lib/iomgr/udp_server.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/alts/alts_credentials.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/credentials_metadata.cc',
'src/core/lib/security/credentials/external/aws_external_account_credentials.cc',
'src/core/lib/security/credentials/external/aws_request_signer.cc',
'src/core/lib/security/credentials/external/external_account_credentials.cc',
'src/core/lib/security/credentials/external/file_external_account_credentials.cc',
'src/core/lib/security/credentials/external/url_external_account_credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/google_default/credentials_generic.cc',
'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
'src/core/lib/security/credentials/iam/iam_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/jwt/json_token.cc',
'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
'src/core/lib/security/credentials/local/local_credentials.cc',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
'src/core/lib/security/credentials/tls/tls_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/credentials/xds/xds_credentials.cc',
'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/local/local_security_connector.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.cc',
'src/core/lib/security/security_connector/ssl_utils.cc',
'src/core/lib/security/security_connector/ssl_utils_config.cc',
'src/core/lib/security/security_connector/tls/tls_security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_intern.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/init_secure.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/authority_override.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/metadata.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/static_metadata.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/status_metadata.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/tsi/alts/crypt/aes_gcm.cc',
'src/core/tsi/alts/crypt/gsec.cc',
'src/core/tsi/alts/frame_protector/alts_counter.cc',
'src/core/tsi/alts/frame_protector/alts_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_frame_protector.cc',
'src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc',
'src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/frame_handler.cc',
'src/core/tsi/alts/handshaker/alts_handshaker_client.cc',
'src/core/tsi/alts/handshaker/alts_shared_resource.cc',
'src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc',
'src/core/tsi/alts/handshaker/alts_tsi_utils.cc',
'src/core/tsi/alts/handshaker/transport_security_common_api.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc',
'src/core/tsi/ssl/session_cache/ssl_session_cache.cc',
'src/core/tsi/ssl/session_cache/ssl_session_openssl.cc',
'src/core/tsi/ssl_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
],
},
{
'target_name': 'grpc_csharp_ext',
'type': 'static_library',
'dependencies': [
'grpc',
'gpr',
'address_sorting',
'upb',
],
'sources': [
'src/csharp/ext/grpc_csharp_ext.c',
],
},
{
'target_name': 'grpc_test_util',
'type': 'static_library',
'dependencies': [
'grpc',
'gpr',
'address_sorting',
'upb',
'absl/debugging:symbolize',
'absl/debugging:stacktrace',
'absl/debugging:failure_signal_handler',
],
'sources': [
'test/core/util/cmdline.cc',
'test/core/util/eval_args_mock_endpoint.cc',
'test/core/util/fuzzer_util.cc',
'test/core/util/grpc_profiler.cc',
'test/core/util/histogram.cc',
'test/core/util/memory_counters.cc',
'test/core/util/mock_endpoint.cc',
'test/core/util/parse_hexstring.cc',
'test/core/util/passthru_endpoint.cc',
'test/core/util/port.cc',
'test/core/util/port_isolated_runtime_environment.cc',
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/test_tcp_server.cc',
'test/core/util/tls_utils.cc',
'test/core/util/tracer_util.cc',
'test/core/util/trickle_endpoint.cc',
],
},
{
'target_name': 'grpc_test_util_unsecure',
'type': 'static_library',
'dependencies': [
'grpc_unsecure',
'gpr',
'address_sorting',
'upb',
'absl/debugging:symbolize',
'absl/debugging:stacktrace',
'absl/debugging:failure_signal_handler',
],
'sources': [
'test/core/util/cmdline.cc',
'test/core/util/eval_args_mock_endpoint.cc',
'test/core/util/fuzzer_util.cc',
'test/core/util/grpc_profiler.cc',
'test/core/util/histogram.cc',
'test/core/util/memory_counters.cc',
'test/core/util/mock_endpoint.cc',
'test/core/util/parse_hexstring.cc',
'test/core/util/passthru_endpoint.cc',
'test/core/util/port.cc',
'test/core/util/port_isolated_runtime_environment.cc',
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/test_tcp_server.cc',
'test/core/util/tracer_util.cc',
'test/core/util/trickle_endpoint.cc',
],
},
{
'target_name': 'grpc_unsecure',
'type': 'static_library',
'dependencies': [
'gpr',
'address_sorting',
'upb',
'absl/types:optional',
'absl/strings:strings',
'absl/status:statusor',
'absl/status:status',
'absl/container:inlined_vector',
'absl/container:flat_hash_map',
],
'sources': [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_libuv.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/server_address.cc',
'src/core/ext/filters/client_channel/service_config.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/service_config_parser.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_idle/client_idle_filter.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/max_age/max_age_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc',
'src/core/ext/filters/workarounds/workaround_utils.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/authority.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_plugin.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/incoming_metadata.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/udpa/data/orca/v1/orca_load_report.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/lib/avl/avl.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/handshaker.cc',
'src/core/lib/channel/handshaker_registry.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_args.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/compression/stream_compression.cc',
'src/core/lib/compression/stream_compression_gzip.cc',
'src/core/lib/compression/stream_compression_identity.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_uv.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_custom.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_uv.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/is_epollexclusive_available.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/parse_address.cc',
'src/core/lib/iomgr/poller/eventmanager_libuv.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_custom.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_custom.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_uv.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_custom.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/resource_quota.cc',
'src/core/lib/iomgr/sockaddr_utils.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_uv.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_custom.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_custom.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_custom.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_uv.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_custom.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/timer_uv.cc',
'src/core/lib/iomgr/udp_server.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_intern.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/init_unsecure.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/authority_override.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/metadata.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/static_metadata.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/status_metadata.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_unsecure_plugin_registry.cc',
],
},
{
'target_name': 'benchmark_helpers',
'type': 'static_library',
'dependencies': [
'grpc_test_util_unsecure',
'grpc++_unsecure',
'grpc_unsecure',
'grpc++_test_config',
'gpr',
'address_sorting',
'upb',
'benchmark',
],
'sources': [
'src/proto/grpc/testing/echo.proto',
'src/proto/grpc/testing/echo_messages.proto',
'src/proto/grpc/testing/simple_messages.proto',
'test/cpp/microbenchmarks/helpers.cc',
],
},
{
'target_name': 'grpc++',
'type': 'static_library',
'dependencies': [
'grpc',
'gpr',
'address_sorting',
'upb',
'absl/synchronization:synchronization',
],
'sources': [
'src/cpp/client/channel_cc.cc',
'src/cpp/client/client_callback.cc',
'src/cpp/client/client_context.cc',
'src/cpp/client/client_interceptor.cc',
'src/cpp/client/create_channel.cc',
'src/cpp/client/create_channel_internal.cc',
'src/cpp/client/create_channel_posix.cc',
'src/cpp/client/credentials_cc.cc',
'src/cpp/client/insecure_credentials.cc',
'src/cpp/client/secure_credentials.cc',
'src/cpp/client/xds_credentials.cc',
'src/cpp/codegen/codegen_init.cc',
'src/cpp/common/alarm.cc',
'src/cpp/common/auth_property_iterator.cc',
'src/cpp/common/channel_arguments.cc',
'src/cpp/common/channel_filter.cc',
'src/cpp/common/completion_queue_cc.cc',
'src/cpp/common/core_codegen.cc',
'src/cpp/common/resource_quota_cc.cc',
'src/cpp/common/rpc_method.cc',
'src/cpp/common/secure_auth_context.cc',
'src/cpp/common/secure_channel_arguments.cc',
'src/cpp/common/secure_create_auth_context.cc',
'src/cpp/common/tls_certificate_provider.cc',
'src/cpp/common/tls_credentials_options.cc',
'src/cpp/common/tls_credentials_options_util.cc',
'src/cpp/common/validate_service_config.cc',
'src/cpp/common/version_cc.cc',
'src/cpp/server/async_generic_service.cc',
'src/cpp/server/channel_argument_option.cc',
'src/cpp/server/create_default_thread_pool.cc',
'src/cpp/server/dynamic_thread_pool.cc',
'src/cpp/server/external_connection_acceptor_impl.cc',
'src/cpp/server/health/default_health_check_service.cc',
'src/cpp/server/health/health_check_service.cc',
'src/cpp/server/health/health_check_service_server_builder_option.cc',
'src/cpp/server/insecure_server_credentials.cc',
'src/cpp/server/secure_server_credentials.cc',
'src/cpp/server/server_builder.cc',
'src/cpp/server/server_callback.cc',
'src/cpp/server/server_cc.cc',
'src/cpp/server/server_context.cc',
'src/cpp/server/server_credentials.cc',
'src/cpp/server/server_posix.cc',
'src/cpp/server/xds_server_credentials.cc',
'src/cpp/thread_manager/thread_manager.cc',
'src/cpp/util/byte_buffer_cc.cc',
'src/cpp/util/status.cc',
'src/cpp/util/string_ref.cc',
'src/cpp/util/time_cc.cc',
],
},
{
'target_name': 'grpc++_alts',
'type': 'static_library',
'dependencies': [
'grpc++',
'grpc',
'gpr',
'address_sorting',
'upb',
],
'sources': [
'src/cpp/common/alts_context.cc',
'src/cpp/common/alts_util.cc',
],
},
{
'target_name': 'grpc++_error_details',
'type': 'static_library',
'dependencies': [
'grpc++',
'grpc',
'gpr',
'address_sorting',
'upb',
],
'sources': [
'src/cpp/util/error_details.cc',
],
},
{
'target_name': 'grpc++_reflection',
'type': 'static_library',
'dependencies': [
'grpc++',
'grpc',
'gpr',
'address_sorting',
'upb',
],
'sources': [
'src/proto/grpc/reflection/v1alpha/reflection.proto',
'src/cpp/ext/proto_server_reflection.cc',
'src/cpp/ext/proto_server_reflection_plugin.cc',
],
},
{
'target_name': 'grpc++_test',
'type': 'static_library',
'dependencies': [
'grpc++',
'grpc',
'gpr',
'address_sorting',
'upb',
],
'sources': [
'src/cpp/client/channel_test_peer.cc',
],
},
{
'target_name': 'grpc++_test_config',
'type': 'static_library',
'dependencies': [
'gpr',
'absl/flags:parse',
],
'sources': [
'test/cpp/util/test_config_cc.cc',
],
},
{
'target_name': 'grpc++_test_util',
'type': 'static_library',
'dependencies': [
'grpc_test_util',
'grpc++',
'grpc',
'gpr',
'address_sorting',
'upb',
'absl/flags:flag',
],
'sources': [
'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
'test/cpp/util/byte_buffer_proto_helper.cc',
'test/cpp/util/create_test_channel.cc',
'test/cpp/util/string_ref_helper.cc',
'test/cpp/util/subprocess.cc',
'test/cpp/util/test_credentials_provider.cc',
],
},
{
'target_name': 'grpc++_unsecure',
'type': 'static_library',
'dependencies': [
'grpc_unsecure',
'gpr',
'address_sorting',
'upb',
'absl/synchronization:synchronization',
],
'sources': [
'src/cpp/client/channel_cc.cc',
'src/cpp/client/client_callback.cc',
'src/cpp/client/client_context.cc',
'src/cpp/client/client_interceptor.cc',
'src/cpp/client/create_channel.cc',
'src/cpp/client/create_channel_internal.cc',
'src/cpp/client/create_channel_posix.cc',
'src/cpp/client/credentials_cc.cc',
'src/cpp/client/insecure_credentials.cc',
'src/cpp/codegen/codegen_init.cc',
'src/cpp/common/alarm.cc',
'src/cpp/common/channel_arguments.cc',
'src/cpp/common/channel_filter.cc',
'src/cpp/common/completion_queue_cc.cc',
'src/cpp/common/core_codegen.cc',
'src/cpp/common/insecure_create_auth_context.cc',
'src/cpp/common/resource_quota_cc.cc',
'src/cpp/common/rpc_method.cc',
'src/cpp/common/validate_service_config.cc',
'src/cpp/common/version_cc.cc',
'src/cpp/server/async_generic_service.cc',
'src/cpp/server/channel_argument_option.cc',
'src/cpp/server/create_default_thread_pool.cc',
'src/cpp/server/dynamic_thread_pool.cc',
'src/cpp/server/external_connection_acceptor_impl.cc',
'src/cpp/server/health/default_health_check_service.cc',
'src/cpp/server/health/health_check_service.cc',
'src/cpp/server/health/health_check_service_server_builder_option.cc',
'src/cpp/server/insecure_server_credentials.cc',
'src/cpp/server/server_builder.cc',
'src/cpp/server/server_callback.cc',
'src/cpp/server/server_cc.cc',
'src/cpp/server/server_context.cc',
'src/cpp/server/server_credentials.cc',
'src/cpp/server/server_posix.cc',
'src/cpp/thread_manager/thread_manager.cc',
'src/cpp/util/byte_buffer_cc.cc',
'src/cpp/util/status.cc',
'src/cpp/util/string_ref.cc',
'src/cpp/util/time_cc.cc',
],
},
{
'target_name': 'grpc_plugin_support',
'type': 'static_library',
'dependencies': [
],
'sources': [
'src/compiler/cpp_generator.cc',
'src/compiler/csharp_generator.cc',
'src/compiler/node_generator.cc',
'src/compiler/objective_c_generator.cc',
'src/compiler/php_generator.cc',
'src/compiler/python_generator.cc',
'src/compiler/ruby_generator.cc',
],
},
{
'target_name': 'grpcpp_channelz',
'type': 'static_library',
'dependencies': [
'grpc++',
'grpc',
'gpr',
'address_sorting',
'upb',
],
'sources': [
'src/proto/grpc/channelz/channelz.proto',
'src/cpp/server/channelz/channelz_service.cc',
'src/cpp/server/channelz/channelz_service_plugin.cc',
],
},
{
'target_name': 'boringssl',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/boringssl-with-bazel/err_data.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bitstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bool.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_dup.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_gentm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_mbstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_object.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_octet.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_print.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strnid.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_time.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utf8.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_par.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn_pack.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_string.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_dec.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_fre.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_new.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_typ.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_utl.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/time_support.c',
'third_party/boringssl-with-bazel/src/crypto/base64/base64.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio_mem.c',
'third_party/boringssl-with-bazel/src/crypto/bio/connect.c',
'third_party/boringssl-with-bazel/src/crypto/bio/fd.c',
'third_party/boringssl-with-bazel/src/crypto/bio/file.c',
'third_party/boringssl-with-bazel/src/crypto/bio/hexdump.c',
'third_party/boringssl-with-bazel/src/crypto/bio/pair.c',
'third_party/boringssl-with-bazel/src/crypto/bio/printf.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket_helper.c',
'third_party/boringssl-with-bazel/src/crypto/blake2/blake2.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/bn_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/convert.c',
'third_party/boringssl-with-bazel/src/crypto/buf/buf.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/asn1_compat.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/ber.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbb.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbs.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/unicode.c',
'third_party/boringssl-with-bazel/src/crypto/chacha/chacha.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/cipher_extra.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/derive_key.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesccm.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesctrhmac.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesgcmsiv.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_chacha20poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_null.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc2.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc4.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_tls.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/tls_cbc.c',
'third_party/boringssl-with-bazel/src/crypto/cmac/cmac.c',
'third_party/boringssl-with-bazel/src/crypto/conf/conf.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-win.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-intel.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-ppc64le.c',
'third_party/boringssl-with-bazel/src/crypto/crypto.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/curve25519.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/spake25519.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/dh_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/params.c',
'third_party/boringssl-with-bazel/src/crypto/digest_extra/digest_extra.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_derive.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/hash_to_curve.c',
'third_party/boringssl-with-bazel/src/crypto/ecdh_extra/ecdh_extra.c',
'third_party/boringssl-with-bazel/src/crypto/ecdsa_extra/ecdsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/engine/engine.c',
'third_party/boringssl-with-bazel/src/crypto/err/err.c',
'third_party/boringssl-with-bazel/src/crypto/evp/digestsign.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_ctx.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/pbkdf.c',
'third_party/boringssl-with-bazel/src/crypto/evp/print.c',
'third_party/boringssl-with-bazel/src/crypto/evp/scrypt.c',
'third_party/boringssl-with-bazel/src/crypto/evp/sign.c',
'third_party/boringssl-with-bazel/src/crypto/ex_data.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/bcm.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/fips_shared_support.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/is_fips.c',
'third_party/boringssl-with-bazel/src/crypto/hkdf/hkdf.c',
'third_party/boringssl-with-bazel/src/crypto/hpke/hpke.c',
'third_party/boringssl-with-bazel/src/crypto/hrss/hrss.c',
'third_party/boringssl-with-bazel/src/crypto/lhash/lhash.c',
'third_party/boringssl-with-bazel/src/crypto/mem.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj_xref.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_all.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_info.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_lib.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_oth.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pk8.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_xaux.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl-with-bazel/src/crypto/pool/pool.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/deterministic.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/forkunsafe.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/passive.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/rand_extra.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/windows.c',
'third_party/boringssl-with-bazel/src/crypto/rc4/rc4.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_c11.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_lock.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_print.c',
'third_party/boringssl-with-bazel/src/crypto/siphash/siphash.c',
'third_party/boringssl-with-bazel/src/crypto/stack/stack.c',
'third_party/boringssl-with-bazel/src/crypto/thread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_none.c',
'third_party/boringssl-with-bazel/src/crypto/thread_pthread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_win.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/pmbtoken.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/trust_token.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/voprf.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_digest.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_sign.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_strex.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_verify.c',
'third_party/boringssl-with-bazel/src/crypto/x509/algorithm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/asn1_gen.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_dir.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_file.c',
'third_party/boringssl-with-bazel/src/crypto/x509/i2d_pr.c',
'third_party/boringssl-with-bazel/src/crypto/x509/rsa_pss.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_att.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_d2.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_def.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_ext.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_lu.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_obj.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_r2x.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_set.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_txt.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_v3.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vpm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509cset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509rset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_algor.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_all.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_attrib.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_exten.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_sig.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_val.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_cache.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_data.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_map.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_node.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_tree.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akeya.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_alt.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bitst.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_conf.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_cpols.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_crld.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_enum.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_extku.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_genn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ia5.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_int.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ncons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ocsp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pci.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcia.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_prn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_purp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_skey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_utl.c',
'third_party/boringssl-with-bazel/src/ssl/bio_ssl.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_both.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_srtp.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_record.cc',
'third_party/boringssl-with-bazel/src/ssl/handoff.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_client.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_server.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_both.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_aead_ctx.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_asn1.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_buffer.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cert.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cipher.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_file.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_key_share.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_privkey.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_session.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_stat.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_transcript.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_versions.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_x509.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_both.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_client.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_server.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_record.cc',
],
},
{
'target_name': 'boringssl_test_util',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/boringssl-with-bazel/src/crypto/test/file_test.cc',
'third_party/boringssl-with-bazel/src/crypto/test/malloc.cc',
'third_party/boringssl-with-bazel/src/crypto/test/test_util.cc',
'third_party/boringssl-with-bazel/src/crypto/test/wycheproof_util.cc',
],
},
{
'target_name': 'benchmark',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/benchmark/src/benchmark.cc',
'third_party/benchmark/src/benchmark_api_internal.cc',
'third_party/benchmark/src/benchmark_main.cc',
'third_party/benchmark/src/benchmark_name.cc',
'third_party/benchmark/src/benchmark_register.cc',
'third_party/benchmark/src/benchmark_runner.cc',
'third_party/benchmark/src/colorprint.cc',
'third_party/benchmark/src/commandlineflags.cc',
'third_party/benchmark/src/complexity.cc',
'third_party/benchmark/src/console_reporter.cc',
'third_party/benchmark/src/counter.cc',
'third_party/benchmark/src/csv_reporter.cc',
'third_party/benchmark/src/json_reporter.cc',
'third_party/benchmark/src/reporter.cc',
'third_party/benchmark/src/sleep.cc',
'third_party/benchmark/src/statistics.cc',
'third_party/benchmark/src/string_util.cc',
'third_party/benchmark/src/sysinfo.cc',
'third_party/benchmark/src/timers.cc',
],
},
{
'target_name': 're2',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/re2/re2/bitstate.cc',
'third_party/re2/re2/compile.cc',
'third_party/re2/re2/dfa.cc',
'third_party/re2/re2/filtered_re2.cc',
'third_party/re2/re2/mimics_pcre.cc',
'third_party/re2/re2/nfa.cc',
'third_party/re2/re2/onepass.cc',
'third_party/re2/re2/parse.cc',
'third_party/re2/re2/perl_groups.cc',
'third_party/re2/re2/prefilter.cc',
'third_party/re2/re2/prefilter_tree.cc',
'third_party/re2/re2/prog.cc',
'third_party/re2/re2/re2.cc',
'third_party/re2/re2/regexp.cc',
'third_party/re2/re2/set.cc',
'third_party/re2/re2/simplify.cc',
'third_party/re2/re2/stringpiece.cc',
'third_party/re2/re2/tostring.cc',
'third_party/re2/re2/unicode_casefold.cc',
'third_party/re2/re2/unicode_groups.cc',
'third_party/re2/util/pcre.cc',
'third_party/re2/util/rune.cc',
'third_party/re2/util/strutil.cc',
],
},
{
'target_name': 'upb',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/upb/upb/decode_fast.c',
'third_party/upb/upb/decode.c',
'third_party/upb/upb/def.c',
'third_party/upb/upb/encode.c',
'third_party/upb/upb/json_decode.c',
'third_party/upb/upb/json_encode.c',
'third_party/upb/upb/msg.c',
'third_party/upb/upb/reflection.c',
'third_party/upb/upb/table.c',
'third_party/upb/upb/text_encode.c',
'third_party/upb/upb/upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
],
},
{
'target_name': 'z',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
],
},
]
}
|
py | b40bc5bee1e0d7eb869c2b0f97b5abdfe9ed6d27 | from setuptools import setup
dependencies = [
"multidict==5.1.0", # Avoid 5.2.0 due to Avast
"aiofiles==0.7.0", # Async IO for files
"blspy==1.0.9", # Signature library
"chiavdf==1.0.5", # timelord and vdf verification
"chiabip158==1.1", # bip158-style wallet filters
"chiapos==1.0.9", # proof of space
"clvm==0.9.7",
"clvm_rs==0.1.19",
"clvm_tools==0.4.3",
"aiohttp==3.7.4", # HTTP server for full node rpc
"aiosqlite==0.17.0", # asyncio wrapper for sqlite, to store blocks
"bitstring==3.1.9", # Binary data management library
"colorama==0.4.4", # Colorizes terminal output
"colorlog==5.0.1", # Adds color to logs
"concurrent-log-handler==0.9.19", # Concurrently log and rotate logs
"cryptography==3.4.7", # Python cryptography library for TLS - keyring conflict
"fasteners==0.16.3", # For interprocess file locking
"filelock==3.4.2", # For reading and writing config multiprocess and multithread safely (non-reentrant locks)
"keyring==23.0.1", # Store keys in MacOS Keychain, Windows Credential Locker
"keyrings.cryptfile==1.3.4", # Secure storage for keys on Linux (Will be replaced)
# "keyrings.cryptfile==1.3.8", # Secure storage for keys on Linux (Will be replaced)
# See https://github.com/frispete/keyrings.cryptfile/issues/15
"PyYAML==5.4.1", # Used for config file format
"setproctitle==1.2.2", # Gives the littlelambocoin processes readable names
"sortedcontainers==2.4.0", # For maintaining sorted mempools
"websockets==8.1.0", # For use in wallet RPC and electron UI
# TODO: when moving to click 8 remove the pinning of black noted below
"click==7.1.2", # For the CLI
"dnspythonchia==2.2.0", # Query DNS seeds
"watchdog==2.1.6", # Filesystem event watching - watches keyring.yaml
"dnslib==0.9.17", # dns lib
"typing-extensions==4.0.1", # typing backports like Protocol and TypedDict
"zstd==1.5.0.4",
"packaging==21.0",
"wget==3.2", # Only for downloading peer node list
]
upnp_dependencies = [
"miniupnpc==2.2.2", # Allows users to open ports on their router
]
dev_dependencies = [
"build",
"pre-commit",
"pytest",
"pytest-asyncio>=0.18.1", # require attribute 'fixture'
"pytest-monitor; sys_platform == 'linux'",
"pytest-xdist",
"twine",
"isort",
"flake8",
"mypy",
# TODO: black 22.1.0 requires click>=8, remove this pin after updating to click 8
"black==21.12b0",
"aiohttp_cors", # For blackd
"ipython", # For asyncio debugging
"types-aiofiles",
"types-click",
"types-cryptography",
"types-pkg_resources",
"types-pyyaml",
"types-setuptools",
]
kwargs = dict(
name="littlelambocoin-blockchain",
description="Littlelambocoin blockchain full node, farmer, timelord, and wallet.",
url="https://littlelambocoinnetwork.org/",
license="Apache License",
python_requires=">=3.7, <4",
keywords="littlelambocoin blockchain node",
install_requires=dependencies,
extras_require=dict(
uvloop=["uvloop"],
dev=dev_dependencies,
upnp=upnp_dependencies,
),
packages=[
"build_scripts",
"littlelambocoin",
"littlelambocoin.cmds",
"littlelambocoin.clvm",
"littlelambocoin.consensus",
"littlelambocoin.daemon",
"littlelambocoin.full_node",
"littlelambocoin.timelord",
"littlelambocoin.farmer",
"littlelambocoin.harvester",
"littlelambocoin.introducer",
"littlelambocoin.plotters",
"littlelambocoin.plotting",
"littlelambocoin.pools",
"littlelambocoin.protocols",
"littlelambocoin.rpc",
"littlelambocoin.seeder",
"littlelambocoin.server",
"littlelambocoin.simulator",
"littlelambocoin.types.blockchain_format",
"littlelambocoin.types",
"littlelambocoin.util",
"littlelambocoin.wallet",
"littlelambocoin.wallet.puzzles",
"littlelambocoin.wallet.rl_wallet",
"littlelambocoin.wallet.cat_wallet",
"littlelambocoin.wallet.did_wallet",
"littlelambocoin.wallet.settings",
"littlelambocoin.wallet.trading",
"littlelambocoin.wallet.util",
"littlelambocoin.ssl",
"mozilla-ca",
],
entry_points={
"console_scripts": [
"littlelambocoin = littlelambocoin.cmds.littlelambocoin:main",
"littlelambocoin_wallet = littlelambocoin.server.start_wallet:main",
"littlelambocoin_full_node = littlelambocoin.server.start_full_node:main",
"littlelambocoin_harvester = littlelambocoin.server.start_harvester:main",
"littlelambocoin_farmer = littlelambocoin.server.start_farmer:main",
"littlelambocoin_introducer = littlelambocoin.server.start_introducer:main",
"littlelambocoin_crawler = littlelambocoin.seeder.start_crawler:main",
"littlelambocoin_seeder = littlelambocoin.seeder.dns_server:main",
"littlelambocoin_timelord = littlelambocoin.server.start_timelord:main",
"littlelambocoin_timelord_launcher = littlelambocoin.timelord.timelord_launcher:main",
"littlelambocoin_full_node_simulator = littlelambocoin.simulator.start_simulator:main",
]
},
package_data={
"littlelambocoin": ["pyinstaller.spec"],
"": ["*.clvm", "*.clvm.hex", "*.clib", "*.clinc", "*.clsp", "py.typed"],
"littlelambocoin.util": ["initial-*.yaml", "english.txt"],
"littlelambocoin.ssl": ["littlelambocoin_ca.crt", "littlelambocoin_ca.key", "dst_root_ca.pem"],
"mozilla-ca": ["cacert.pem"],
},
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
zip_safe=False,
)
if __name__ == "__main__":
setup(**kwargs) # type: ignore
|
py | b40bc68690dc8ff96afa64c5fa4c4ed7fab12c99 | from typing import Any, Dict, List, Set
from ..language import (
DocumentNode,
FragmentDefinitionNode,
FragmentSpreadNode,
OperationDefinitionNode,
SelectionSetNode,
Visitor,
visit,
)
__all__ = ["separate_operations"]
DepGraph = Dict[str, List[str]]
def separate_operations(document_ast: DocumentNode) -> Dict[str, DocumentNode]:
"""Separate operations in a given AST document.
This function accepts a single AST document which may contain many operations and
fragments and returns a collection of AST documents each of which contains a single
operation as well the fragment definitions it refers to.
"""
operations: List[OperationDefinitionNode] = []
dep_graph: DepGraph = {}
# Populate metadata and build a dependency graph.
for definition_node in document_ast.definitions:
if isinstance(definition_node, OperationDefinitionNode):
operations.append(definition_node)
elif isinstance(
definition_node, FragmentDefinitionNode
): # pragma: no cover else
dep_graph[definition_node.name.value] = collect_dependencies(
definition_node.selection_set
)
# For each operation, produce a new synthesized AST which includes only what is
# necessary for completing that operation.
separated_document_asts: Dict[str, DocumentNode] = {}
for operation in operations:
dependencies: Set[str] = set()
for fragment_name in collect_dependencies(operation.selection_set):
collect_transitive_dependencies(dependencies, dep_graph, fragment_name)
# Provides the empty string for anonymous operations.
operation_name = operation.name.value if operation.name else ""
# The list of definition nodes to be included for this operation, sorted
# to retain the same order as the original document.
separated_document_asts[operation_name] = DocumentNode(
definitions=[
node
for node in document_ast.definitions
if node is operation
or (
isinstance(node, FragmentDefinitionNode)
and node.name.value in dependencies
)
]
)
return separated_document_asts
def collect_transitive_dependencies(
collected: Set[str], dep_graph: DepGraph, from_name: str
) -> None:
"""Collect transitive dependencies.
From a dependency graph, collects a list of transitive dependencies by recursing
through a dependency graph.
"""
if from_name not in collected:
collected.add(from_name)
immediate_deps = dep_graph.get(from_name)
if immediate_deps is not None:
for to_name in immediate_deps:
collect_transitive_dependencies(collected, dep_graph, to_name)
class DependencyCollector(Visitor):
dependencies: List[str]
def __init__(self) -> None:
super().__init__()
self.dependencies = []
self.add_dependency = self.dependencies.append
def enter_fragment_spread(self, node: FragmentSpreadNode, *_args: Any) -> None:
self.add_dependency(node.name.value)
def collect_dependencies(selection_set: SelectionSetNode) -> List[str]:
collector = DependencyCollector()
visit(selection_set, collector)
return collector.dependencies
|
py | b40bc711f1d87356c968a4f13a94e02a23fc056d | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
EnsurePythonVersion(0,0)
Exit(0)
""")
test.run()
test.write('SConstruct', """\
EnsurePythonVersion(2000,0)
Exit(0)
""")
test.run(status=2)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
py | b40bc71de161b3856c18e65b7706774d0b9a6b2f | import setuptools
from crossrefapiclient import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="crossref-api-client",
version=__version__,
author="awakenedhaki",
description="A small example package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/awakenedhaki/crossref-api-python-client",
packages=setuptools.find_packages(),
classifiers=[],
python_requires='>=3.6',
) |
py | b40bc7da0bee767d5445b9fef3ab59c44ffbcf2c | import asyncpg
import string
import random
import traceback
import re
import discord
import contextlib
import asyncio
import copy
from discord.ext import commands
from cogs.utils import Cog
from cogs.utils import OpenRobotFormatter
import traceback_with_variables as custom_traceback
class MissingButton(discord.ui.Button):
def __init__(
self,
error: commands.MissingRequiredArgument,
embed: discord.Embed,
*args,
**kwargs,
):
ctx = kwargs.pop("ctx")
super().__init__(*args, **kwargs)
self.error = error
self.embed = embed
self.ctx = ctx
async def callback(self, interaction: discord.Interaction):
ctx = self.ctx
param = self.error.param
m = f"Please enter your argument for `{param.name}`."
await interaction.response.edit_message(content=m, embed=None, view=None)
def check(m: discord.Message) -> bool:
return m.author == ctx.author and m.channel == ctx.channel
with contextlib.suppress(asyncio.TimeoutError):
message = await ctx.bot.wait_for("message", check=check, timeout=60)
new_message = copy.copy(ctx.message)
new_message.content += f" {message.content}"
await ctx.bot.process_commands(new_message)
class View(discord.ui.View):
async def on_timeout(self) -> None:
await self.message.delete()
class Error(Cog):
IGNORED_ERRORS = (commands.NotOwner, commands.CommandNotFound)
async def initiate_tb_pool(self):
await self.bot.wait_until_ready() # Db is initialted when the bot is ready, so....
if self.bot.tb_pool:
await self.bot.error.initiate()
async def generate_missing_required_argument(
self, ctx: commands.Context, error: commands.MissingRequiredArgument
):
command = ctx.command
param_name = error.param.name
signature = command.signature
sig_split = signature.split(" ")
end = None
spaces = 0
for arg in sig_split:
if param_name == re.sub(r"<|>|\[|\]", "", arg) or param_name == arg[1:-1]:
end = spaces + len(arg)
break
spaces += 1
if end is None:
return None
signature += (
"\n"
+ "\u200b " * len(f"{ctx.prefix}{command.qualified_name} ")
+ "\u200b " * spaces
+ "^" * end
)
final_signature = f"{ctx.prefix}{command.qualified_name} {signature}"
return final_signature
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error: Exception):
if ctx.command == self.bot.get_command("jsk dbg"):
return
if ctx.command.has_error_handler():
return
if cog := ctx.cog:
if cog.has_error_handler():
return
error = getattr(error, "original", error)
if isinstance(error, self.IGNORED_ERRORS):
return
elif isinstance(error, commands.CheckFailure) and ctx.cog == self.bot.get_cog(
"API"
):
return
elif isinstance(error, commands.MissingRequiredArgument):
signature = await self.generate_missing_required_argument(ctx, error)
if signature is None:
return await ctx.send(
f"Missing required argument: `{error.param.name}`. Maybe take a look at the help command by doing `{ctx.prefix}help {ctx.command.qualified_name}`."
)
embed = discord.Embed(color=self.bot.color)
embed.description = f"Missing required argument: `{error.param.name}`. Maybe take a look at the help command by doing `{ctx.prefix}help {ctx.command.qualified_name}`."
embed.set_footer(
text=f"Command invoked by: {ctx.author}", icon_url=ctx.author.avatar.url
)
embed.description += f"\n\n**Errored At:** ```prolog\n{signature}```"
view = View(timeout=60)
async def interaction_check(interaction: discord.Interaction):
if interaction.user != ctx.author:
await interaction.response.send_message(
"This is not your interaction!", ephemeral=True
)
return False
return True
view.interaction_check = interaction_check
view.add_item(
MissingButton(
error,
embed,
ctx=ctx,
style=discord.ButtonStyle.green,
label=f"Enter required argument for '{error.param.name}'",
)
)
view.message = await ctx.send(embed=embed, view=view)
return
report_channel = self.bot.get_channel(905631512467230790)
colored_tb = "\n".join(
custom_traceback.iter_exc_lines(
error,
fmt=custom_traceback.Format(
color_scheme=custom_traceback.ColorSchemes.common
),
)
)
etype = type(error)
trace = error.__traceback__
lines = traceback.format_exception(etype, error, trace)
original_traceback = "".join(lines)
pretty_traceback = "\n".join(
OpenRobotFormatter(no_color=True).format_exception(error)
)
paginator = commands.Paginator(max_size=4000, prefix="```py")
l = pretty_traceback.split("\n")
for i in l:
paginator.add_line(i)
error_id = ""
for i in range(random.randint(5, 50)):
error_id += random.choice(string.ascii_lowercase + string.digits)
has_set_author = False
if self.bot.tb_pool:
url = f"https://traceback.openrobot.xyz/{error_id}"
while True:
try:
await self.bot.error.create(
user_id=ctx.author.id,
error_id=error_id,
guild_id=(ctx.guild.id if ctx.guild else None),
channel_id=ctx.channel.id,
message_id=ctx.message.id,
message_jump_url=ctx.message.jump_url,
pretty_traceback=pretty_traceback,
original_traceback=original_traceback,
)
except asyncpg.exceptions._base.InterfaceError:
pass
else:
break
else:
url = discord.Embed.Empty
for page in paginator.pages:
embed = discord.Embed(color=self.bot.color, description=page)
if (
(not has_set_author)
and self.bot.tb_pool
and url is not discord.Embed.Empty
):
embed.set_author(name=f"ID: {error_id}", url=url)
has_set_author = True
await report_channel.send(embed=embed)
embed = discord.Embed(color=self.bot.color, title="Error:")
embed.description = f"```\n{error}```"
await ctx.send(embed=embed)
raise error
def setup(bot):
bot.add_cog(Error(bot))
|
py | b40bc7ea29ff372952534f9e5091296c49a5501d | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 China Telecommunication Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import tornado.httpserver
import tornado.ioloop
import tornado.options
from tornado.options import options
import tornado.web
import tornado.httpclient
import tornado.gen
import json
import threading
import traceback
from topofetch import *
from jsonrpc import *
from microsrvurl import *
from test import *
import datetime
from base_handler import base_handler
from err import *
from tornado_swagger import swagger
import os
import os.path
from flow_sche_serv import *
from db_util import mysql_utils
swagger.docs()
class lsp_handler(base_handler):
'''
LSP CRUD operations:
- LSP status: creating(0), up(1), down(-1), missing(-2), deleting(2), deleted(3)
- On LSP creation request:
1. Create LSP in tunnel ms, with status 0.
2. Call controller ms to create LSP with callback.
- Fail: Call tunnel ms to delete the LSP and return error to the caller
- OK: save the user_data into tunnel ms and then return status to caller
3. On callback from controller:
- up: call tunnel ms to update the status to 1 and add 'path' data
- down: call tunnel ms to update the status to -1
- creating: call controller ms to delete the LSP without callback (to avoid the zombie LSP occupies resources)
and call tunnel ms to update the status to -1.
- On LSP delete request:
1. Call controller ms to delete the LSP with callbacks.
- OK: call tunnel ms to update the LSP status to 2 and return response to the caller.
- Fail: Do nothing and return fault response to the caller.
2. On callback from controller:
- OK: call tunnel ms to change the status to 3
- Fail: update the tunnel status to 1 to allow callers to delete it again.
'''
def initialize(self):
super(lsp_handler, self).initialize()
self.subreq_tunnel_map = {'lsp_man_get_lsp': 'ms_tunnel_get_lsp',
'lsp_man_del_lsp': 'ms_tunnel_del_lsp',
'lsp_man_delete_lsp': 'ms_tunnel_del_lsp',
'lsp_man_update_lsp': 'ms_tunnel_update_lsp',
'lsp_man_add_lsp' : 'ms_tunnel_add_lsp',
'lsp_man_get_cust_by_lsp':'ms_tunnel_get_cust_by_lsp',
'lsp_man_get_lsp_by_cust':'ms_tunnel_get_lsp_by_cust'
}
self.subreq_ctrler_map = {'lsp_man_get_lsp': 'ms_controller_get_lsp',
'lsp_man_del_lsp': 'ms_controller_del_lsp',
'lsp_man_delete_lsp': 'ms_controller_del_lsp',
'lsp_man_update_lsp': 'ms_controller_update_lsp',
'lsp_man_add_lsp' : 'ms_controller_add_lsp'}
self.lsp_req_map = {'lsp_man_get_lsp' : self.get_lsp,
'lsp_man_del_lsp': self.update_or_delete_lsp,
'lsp_man_delete_lsp': self.update_or_delete_lsp,
'lsp_man_update_lsp': self.update_or_delete_lsp ,
'lsp_man_add_lsp' : self.add_lsp,
'lsp_man_cb_lsp' : self.cb_lsp,
'lsp_man_get_cust_by_lsp':self.get_cust_by_lsp,
'lsp_man_get_lsp_by_cust':self.get_lsp_by_cust,
'lsp_man_get_lsp_status':self.get_lsp_status}
self.log = 0
pass
def form_response(self, req):
resp = {}
resp['response'] = req['request']
resp['ts'] = req['ts']
resp['trans_id'] = req['trans_id']
resp['err_code'] = 0
resp['msg'] = ''
return resp
@tornado.gen.coroutine
def get_lsp_by_cust(self, req):
final_resp = {'err_code':-1, 'result':{}}
try:
# Get lsp uids of each customer
custs = req['args']['cust_uids']
resp = yield self.do_query(microsrvurl_dict['microsrv_tunnel_url'], self.subreq_tunnel_map[req['request']], req['args'])
lsp_uids = {}
if resp is not None and 'result' in resp:
lsp_uids = resp['result']
lsp_dict = {}
for c in lsp_uids:
for lsp in lsp_uids[c]:
lsp_dict[lsp['lsp_uid']] = None
#get lsp details
resp2 = yield self.do_query(microsrvurl_dict['microsrv_tunnel_url'], self.subreq_tunnel_map['lsp_man_get_lsp'],
{'lsp_uids':lsp_dict.keys()})
lsps = resp2['result']['lsps']
lsp_map = {}
for p in lsps:
lsp_map[p['uid']] = p
#Aggregate data
res = {}
for cust_uid in custs:
lsp_list = []
if cust_uid in lsp_uids:
for p in lsp_uids[cust_uid]:
lsp_list.append(lsp_map[p['lsp_uid']])
res[cust_uid] = lsp_list
final_resp['err_code'] = 0
final_resp['result'] = res
except (TypeError,LookupError):
traceback.print_exc()
pass
raise tornado.gen.Return(final_resp)
@tornado.gen.coroutine
def get_lsp_status(self, req):
final_resp = {'err_code':-1, 'result':{}}
try:
resp = yield self.do_query(microsrvurl_dict['microsrv_tunnel_url'],'ms_tunnel_get_lsp_by_uids', req['args'])
lsps = resp['result']
res = {}
for k in lsps:
p = lsps[k]
res[k] = p['status']
final_resp['err_code'] = 0
final_resp['result'] = res
except (LookupError, KeyError):
traceback.print_exc()
pass
raise tornado.gen.Return(final_resp)
@tornado.gen.coroutine
def get_cust_by_lsp(self,req):
final_resp = {'err_code':-1, 'result':{}}
try:
lsps = req['args']['lsps']
if 'from_router_uid' not in lsps[0]:
'ingress node is missing in request.'
resp = yield self.do_query(microsrvurl_dict['microsrv_tunnel_url'],'ms_tunnel_get_lsp_by_uids',
{'lsp_uids':[x['uid'] for x in lsps]})
# A map of {uid: {lsp obj}}
lsp_detail = resp['result']
for p in lsps:
if p['uid'] in lsp_detail:
p['from_router_uid'] = lsp_detail[p['uid']]['from_router_uid']
pass
lsp_uids = [p['uid'] for p in lsps]
lsp_map = {}
for p in lsps:
lsp_map[p['uid']] = p
# Get customer uids with input lsp_uids
args = {'lsp_uids':lsp_uids}
resp = yield self.do_query(microsrvurl_dict['microsrv_tunnel_url'], self.subreq_tunnel_map[req['request']], args)
if resp['err_code'] != MS_OK:
raise tornado.gen.Return(final_resp)
# form a customer list.
custs = resp['result']
cust_dict = {}
for lsp in lsp_uids:
if lsp in custs:
for c in custs[lsp]:
cust_dict[c] = None
# Call customer ms to get detail info.
if len(cust_dict) == 0:
final_resp['err_code'] = 0
raise tornado.gen.Return(final_resp)
resp = yield self.do_query(microsrvurl_dict['microsrv_cust_url'], 'ms_cust_get_customer', {'uids':cust_dict.keys()})
res = resp['result']['customers']
for c in res:
cust_dict[c['uid']] = c
# Get current bitrates of each ingress nodes
#----------------------------------------------------------------------------------
ingress_uids = [p['from_router_uid'] for p in lsps]
flow_resp = yield self.do_query(microsrvurl_dict['microsrv_flow_url'], 'ms_flow_get_flow', {'ingress_uids':[ingress_uids]})
flow_resp = flow_resp['result']
#resp is a map of ingress_uid:flows
# Form the IP list to match customer.
ips = {}
for p in flow_resp:
for f in flow_resp[p]:
ips[f['src']] = None
# call customer ms to convert ips to customers
cust_match = yield self.do_query(microsrvurl_dict['microsrv_cust_url'], 'ms_cust_get_customer_by_ip', {"ips":ips.keys()})
ip_custs = cust_match['result']
#Sum up the flow bps by customers
cust_bps={}
for p in flow_resp:
for f in flow_resp[p]:
ip = f['src']
if ip in ip_custs:
cust = ip_custs[ip]['cust_uid']
bps = f['bps']
if cust in cust_bps:
cust_bps[cust] = int(cust_bps[cust]) + int(bps)
else:
cust_bps[cust] = int(bps)
# Set bps to customers
for cust in cust_bps:
if cust in cust_dict:
c = cust_dict[cust]
c['bps'] = cust_bps[cust]
#------------------------------------------------------------------------------------
# Aggregate the info.
for lsp in lsp_uids:
if lsp in custs:
cs = [cust_dict[c] for c in custs[lsp]]
# Sum up bps of the LSP.
used = 0.0
for c in cs:
if 'bps' in c:
used += float(c['bps'])
perc = 100.0 * used / lsp_map[lsp]['bandwidth'] if 'bandwidth' in lsp_map[lsp] else 0
custs[lsp] = {'flows':cs, 'ratio':perc, 'bps':used}
final_resp['err_code'] = 0
final_resp['result'] = custs
except (TypeError,LookupError):
traceback.print_exc()
pass
raise tornado.gen.Return(final_resp)
@tornado.gen.coroutine
def get_lsp(self, req):
' Get all lsps from tunnel micro service. No interface with controller '
resp = {'err_code':-1, 'result':{}}
try:
resp = yield self.do_query(microsrvurl_dict['microsrv_tunnel_url'],self.subreq_tunnel_map[req['request']], req['args'] )
# Change path into detail information for convenience of view
equips = self.application.equips
lsps = resp['result']['lsps']
for p in lsps:
uid = p['uid']
if 'from_router_uid' in p:
eid = p['from_router_uid']
if eid in equips:
e = equips[eid]
p['from_router_name'] = '' if 'name' not in e else e['name']
if 'to_router_uid' in p:
eid = p['to_router_uid']
if eid in equips:
e = equips[eid]
p['to_router_name'] = '' if 'name' not in e else e['name']
if 'path' not in p:
continue
path = p['path']
detail_path = []
for e_uid in path:
if e_uid in equips:
detail_path.append(equips[e_uid])
else:
detail_path.append({})
p['path'] = detail_path
except (LookupError, TypeError):
traceback.print_exc()
pass
raise tornado.gen.Return(resp)
pass
@tornado.gen.coroutine
def add_lsp(self, req):
' add a lsp '
final_resp = {'err_code':-1, 'result':{}}
try:
# call tunnel service to add a temporary lsp with status 0
resp = yield self.do_query(microsrvurl_dict['microsrv_tunnel_url'], self.subreq_tunnel_map[req['request']], req['args'])
res = resp['result']
if 'uid' not in res:
resp['err_code'] = -1
raise tornado.gen.Return(resp)
uid = res['uid']
# call controller service to add tunnel
rpc = base_rpc('')
req['args']['uid'] = uid
req['args']['callback'] = 'lsp_man_cb_lsp'
resp = yield self.do_query(microsrvurl_dict['microsrv_controller_url'], self.subreq_ctrler_map[req['request']], req['args'])
if resp['err_code'] != MS_OK:
' Error occurs, Delete the LSP from tunnel ms '
args = {}
stat = 1
args['uid'] = uid
resp = yield self.do_query(microsrvurl_dict['microsrv_tunnel_url'], self.subreq_tunnel_map['lsp_man_delete_lsp'], args)
resp['err_code'] = -1
raise tornado.gen.Return(resp)
# The LSP setup is in progress. Save the possible user_data(cookie) from controller.
if 'user_data' in resp:
rpc = base_rpc('')
args = {'uid':uid, 'user_data':resp['user_data']}
resp = yield self.do_query(microsrvurl_dict['microsrv_tunnel_url'], self.subreq_ctrler_map['lsp_man_update_lsp'], args)
final_resp = {'err_code':0}
result = {'uid':uid, 'status':0}
final_resp['result'] = result
except (LookupError, TypeError):
traceback.print_exc()
pass
raise tornado.gen.Return(final_resp)
pass
@tornado.gen.coroutine
def update_or_delete_lsp(self,req):
final_resp = {'err_code':-1, 'result':{}}
try:
#Get user_data from tunnel ms
rpc = base_rpc('')
args = {'lsp_uids':[req['args']['uid']]}
resp = yield self.do_query(microsrvurl_dict['microsrv_tunnel_url'], self.subreq_tunnel_map['lsp_man_get_lsp'], args)
res = {}
if (resp is not None and 'result' in resp and 'lsps' in resp['result'] and resp['result']['lsps'].__len__()>0):
res = resp['result']['lsps'][0]
user_data = None
if 'user_data' in res:
user_data = res['user_data']
if 'lsp_man_del_lsp' == req['request']:
resp = yield self.do_query(microsrvurl_dict['te_flow_sched_url'], 'flow_sched_del_lsp_flow', {'lsp_uid':req['args']['uid']})
# call controller service to update tunnel
rpc = base_rpc('')
if user_data:
req['args']['user_data'] = user_data
req['args']['callback'] = 'lsp_man_cb_lsp'
resp = yield self.do_query(microsrvurl_dict['microsrv_controller_url'], self.subreq_ctrler_map[req['request']], req['args'])
res = resp if resp is not None else {}
if 'user_data' in res:
user_data = res['user_data']
if res['err_code'] == MS_OK:
rpc = base_rpc('')
err = 0
req['args']['user_data'] = user_data
up_args = req['args']
if 'lsp_man_update_lsp' == req['request']:
pass
else:
'LSP delete'
up_args['status'] = 2 #Deleting
final_resp['result'] = {'uid':req['args']['uid'], 'status':2}
resp = yield self.do_query(microsrvurl_dict['microsrv_tunnel_url'],self.subreq_tunnel_map['lsp_man_update_lsp'], req['args'] )
elif res['err_code'] == MS_DELETE_NON_EXISTING:
resp = yield self.do_query(microsrvurl_dict['microsrv_tunnel_url'], self.subreq_tunnel_map['lsp_man_del_lsp'],
{'uid':req['args']['uid']})
else:
raise tornado.gen.Return(final_resp)
# flow_add_tag = False
# db = mysql_utils('topology')
# db.exec_sql('Update flag set flow_add_flag = 0 where id = 1')
# db.commit()
# db.close()
final_resp['err_code'] = 0
except (LookupError,TypeError):
traceback.print_exc()
pass
raise tornado.gen.Return(final_resp)
@tornado.gen.coroutine
def cb_lsp(self,req):
final_resp = {'err_code':-1, 'result':{}}
# The map for transiting status.
# 1. if controller callback with status creating(0), means the unsuccessful setup of LSP (timeout), should
# change the status to down(-1) to allow user process it.
# 2. if controller callback with status deleting(2), means the unsuccessful deletion of LSP(timeout),also
# change the status to down(-1).
# 3. Other values (Up, deleted or a real down, these are stable status), keep the original value.
status_map = {0:-1, 2:-1}
try:
args = req['args']
if 'name' in args:
args.pop('name') #Don't let controller update the LSP name in Tunnel ms.
print 'Callback:\n' + str(args)
status = args['status']
if status in status_map:
status = status_map[status]
up_args = {'uid':args['uid'], 'status':status}
if 'user_data' in args:
up_args['user_data'] = args['user_data']
if 'path' in args:
up_args['path'] = args['path']
resp = yield self.do_query(microsrvurl_dict['microsrv_tunnel_url'], self.subreq_tunnel_map['lsp_man_update_lsp'], up_args)
if resp['err_code'] != 0:
raise tornado.gen.Return(final_resp)
final_resp['err_code'] = 0
except (LookupError, TypeError):
traceback.print_exc()
pass
raise tornado.gen.Return(final_resp)
def get(self):
self.write('Not allowed')
return
@tornado.gen.coroutine
def post(self):
try:
ctnt = self.request.body
req = json.loads(str(ctnt))
self.req = req
resp = self.form_response(req)
res = None
if 'request' not in req or req['request'] not in self.lsp_req_map:
resp['err_code'] = -1
resp['msg'] = 'Unrecognised method'
self.write(json.dumps(resp))
self.finish()
return
#resp = yield tornado.gen.Task(self.lsp_req_map[req['request']], req)
lsp_resp = yield self.lsp_req_map[req['request']](req)
resp['result'] = lsp_resp['result']
resp['err_code'] = lsp_resp['err_code']
self.write(json.dumps(resp))
self.finish()
except Exception, data:
print str(Exception) + str(data)
self.write('Internal Server Error')
self.finish()
traceback.print_exc()
pass
def remove_lsp(id):
rpc = base_rpc(microsrvurl_dict['microsrv_tunnel_url'])
rpc.form_request('ms_tunnel_del_lsp', dict(uid=id))
rpc.do_sync_post()
pass
def sync_lsp(*args, **kwargs):
''
app = args[0]
#Get equips data from topo ms
rpc = base_rpc(microsrvurl_dict['microsrv_topo_url'])
req = rpc.form_request('ms_topo_get_equip',{})
r = rpc.do_sync_post()
es = r['routers'] #an array of equip
em = {}
for e in es:
if 'ports' in e:
e.pop('ports')
em[e['uid']] = e
app.set_attrib('equips', em)
rpc = base_rpc(microsrvurl_dict['microsrv_controller_url'])
args = {}
args['equips'] = es
rpc.form_request('ms_controller_set_equips', args)
r = rpc.do_sync_post()
return
#Get current LSPs from tunnel ms
rpc = base_rpc(microsrvurl_dict['microsrv_tunnel_url'])
req = rpc.form_request('ms_tunnel_get_lsp', {})
r = rpc.do_sync_post()
t_lsps = r['lsps'] if 'lsps' in r else {}
t_map = {}
for lp in t_lsps:
t_map[str(lp['user_data'])] = lp
#Get LSPs from controller ms
rpc = base_rpc(microsrvurl_dict['microsrv_controller_url'])
req = rpc.form_request('ms_controller_get_lsp', {})
r = rpc.do_sync_post()
c_lsps = r['lsps'] if 'lsps' in r else {}
c_map = {}
for cl in c_lsps:
c_map[str(cl['user_data'])] = cl
#Compare result and update lsps in tunnel ms
for tl in t_lsps:
if str(tl['user_data']) not in c_map:
'delete the lsp'
remove_lsp(tl['uid'])
else:
'Update the status if not the same'
c_lsp = c_map[str(tl['user_data'])]
if tl['status'] != c_lsp['status']:
rpc = base_rpc(microsrvurl_dict['microsrv_tunnel_url'])
tl['status'] = c_lsp['status']
rpc.form_request('ms_tunnel_update_lsp', tl)
r = rpc.do_sync_post()
for clsp in c_lsps:
if str(clsp['user_data']) not in t_map:
rpc = base_rpc(microsrvurl_dict['microsrv_tunnel_url'])
rpc.form_request('ms_tunnel_add_lsp', clsp)
r = rpc.do_sync_post()
pass
tornado.ioloop.IOLoop.instance().add_timeout(
datetime.timedelta(milliseconds=60*1000),
sync_lsp, app)
pass
class lsp_app(tornado.web.Application):
def __init__(self, other_app):
self.other_app = other_app
handlers = [
(r'/', lsp_handler),
]
settings = {
'template_path': 'templates',
'static_path': 'static'
}
tornado.web.Application.__init__(self, handlers, **settings)
self.equips = {}
tornado.ioloop.IOLoop.instance().add_timeout(
datetime.timedelta(milliseconds=1000),
sync_lsp, self)
pass
def set_attrib(self, name, val):
if name in self.__dict__:
object.__setattr__(self, name, val)
if self.other_app:
object.__setattr__(self.other_app, name, val)
pass
@swagger.model()
class lsp(object):
"""
@description:
LSP model
@property hop_list: Desired hop list of the LSP. Each item of the list is the node uid.
@ptype hop_list: C{list} of L{String}
"""
def __init__(self, ingress_node_id, egress_node_id, ingress_node_name, egress_node_name, bandwidth, hop_list, uid = None, path=None):
self.ingress_node_id = ingress_node_id
self.egress_node_id = egress_node_id
self.ingress_node_name = ingress_node_name
self.egress_node_name = egress_node_name
self.bandwidth = bandwidth
self.hop_list = hop_list
self.uid = uid
self.path = path
class lsp_post_handler(lsp_handler):
@tornado.gen.coroutine
@swagger.operation(nickname='add_lsp')
def post(self):
"""
@param body: create an LSP
@type body: L{lsp}
@in body: body
@return 200: flow was created.
@raise 500: invalid input
@description: Add a new LSP
@notes: POST lsp/
<br /> request body sample <br />
{"hop_list": ["2", "6"], "ingress_node_uid": "2", "ingress_node_name": "", "lsp_name": "alu_2_6_lsp", "egress_node_uid": "6", "priority": null, "bandwidth": 100.0, "delay": null, "egress_node_name": ""}
"""
p = json.loads(self.request.body)
np = {}
rev_map = {}
for k in self.application.lsp_attrib_map:
rev_map[self.application.lsp_attrib_map[k]] = k
for k in p:
if k in rev_map:
np[rev_map[k]] = p[k]
else:
np[k] = p[k]
rpc = base_rpc('')
req = rpc.form_request('lsp_man_add_lsp', np)
resp = yield self.add_lsp(req)
result = resp['result']
rest_resp = {'lsp_uid':result['uid'] if 'uid' in result else '', 'status':result['status'] if 'status' in result else ''}
self.write(json.dumps(rest_resp))
self.finish()
pass
@tornado.gen.coroutine
@swagger.operation(nickname='update_lsp')
def put(self):
"""
@param body: update an LSP
@type body: L{lsp}
@in body: body
@rtype: {}
@description: Update LSP. Only LSP bandwidth is allowed to be updated in current version.
@notes: PUT lsp/
"""
p = json.loads(self.request.body)
id = p['uid']
bw = p['bandwidth']
rpc = base_rpc('')
req = rpc.form_request('lsp_man_update_lsp', {'uid':id, 'bandwidth':bw})
resp = yield self.update_or_delete_lsp(req)
self.write('')
self.finish()
pass
@tornado.gen.coroutine
@swagger.operation(nickname='get_all_lsp')
def get(self):
"""
@rtype: list of lsp
Example:<br />
{"lsps": [{"uid": "lsp_0", "ingress_node_name": "", "egress_node_name": "", "bandwidth": 1000, "ingress_node_uid": "100", "egress_node_uid": "102", "lsp_name": "vip_lsp1", "path":["100","101", "102"] , "user_data":"xxx"}]}
<br /> <br />
lsp_name: The name of an LSP <br />
ingress_node_name: name of the ingress node (Get from BRS). <br />
ingress_node_uid: unique id of ingress node.<br />
egress_node_name: name of the egress node <br />
egress_node_uid: unique id of egress node. <br />
uid: unique id of the LSP <br />
path: A list of node uids that the LSP traverses in sequence. <br />
user_data: opaque context data of the LSP. It will be used at manipulation of the LSP. <br />
bandwidth: Configured LSP capacity in Mbps
@description: Get LSP information. return all available LSPs
@notes: GET lsp/
"""
rpc = base_rpc('')
req = rpc.form_request('lsp_man_get_lsp', {})
resp = yield self.get_lsp(req)
lsps = resp['result']['lsps']
val = []
for p in lsps:
np = self.map_obj_key(p, self.application.lsp_attrib_map)
val.append(np)
rest_resp = {'lsps':val}
self.write(json.dumps(rest_resp))
self.finish()
class lsp_get_handler(lsp_handler):
@tornado.gen.coroutine
@swagger.operation(nickname='delete_lsp')
def delete(self, lsp_uid):
"""
@param lsp_uid:
@type lsp_uid: L{string}
@in lsp_uid: path
@required lsp_uid: True
@rtype: list of lsp
@description: Delete an LSP
@notes: DELETE lsp/uid
"""
rpc = base_rpc('')
req = rpc.form_request('lsp_man_del_lsp', {'uid':lsp_uid})
resp = yield self.update_or_delete_lsp(req)
if resp['err_code'] == 0:
self.write('')
else:
raise tornado.web.HTTPError(500)
pass
@tornado.gen.coroutine
@swagger.operation(nickname='get_lsp')
def get(self, ingress_uid):
"""
@param ingress_uid:
@type ingress_uid: L{string}
@in ingress_uid: path
@required ingress_uid: True
@rtype: list of lsp
Example:<br />
{"lsps": [{"uid": "lsp_0", "ingress_node_name": "", "egress_node_name": "", "bandwidth": 1000, "ingress_node_uid": "100", "egress_node_uid": "102", "lsp_name": "vip_lsp1", "path":["100","101", "102"] , "user_data":"xxx"}]}
<br /> <br />
lsp_name: The name of an LSP <br />
ingress_node_name: name of the ingress node (Get from BRS). <br />
ingress_node_uid: unique id of ingress node.<br />
egress_node_name: name of the egress node <br />
egress_node_uid: unique id of egress node. <br />
uid: unique id of the LSP <br />
path: A list of node uids that the LSP traverses in sequence. <br />
user_data: opaque context data of the LSP. It will be used at manipulation of the LSP. <br />
bandwidth: Configured LSP capacity in Mbps
@description: Get LSP information. If the ingress_node_uid presents, return the LSP starts from the desired node.
otherwise, return all available LSPs
@notes: GET lsp/uid or GET lsp/
"""
args = {} if not ingress_uid else {'from_router_uid':str(ingress_uid)}
rpc = base_rpc('')
req = rpc.form_request('lsp_man_get_lsp', args)
resp = yield self.get_lsp(req)
if 'result' not in resp:
self.write('{}')
self.finish()
# Field name conversion.
lsps = resp['result']['lsps']
val = []
for p in lsps:
np = self.map_obj_key(p, self.application.lsp_attrib_map)
val.append(np)
rest_resp = {'lsps':val}
self.write(json.dumps(rest_resp))
self.finish()
pass
class lsp_vsite_handler(lsp_handler):
@tornado.gen.coroutine
@swagger.operation(nickname='get_lsp_by_vsite')
def get(self, vsite_uid):
"""
@param vsite_uid:
@type vsite_uid: L{string}
@in vsite_uid: path
@required vsite_uid: True
@rtype: map of {vsite_uid:[L{lsp}]}
@description: Get the LSPs of the flow specs of vsite
@notes: GET lsp/visite/{uid}
"""
vsites = vsite_uid.split(',')
rpc = base_rpc('')
req = rpc.form_request('lsp_man_get_lsp_by_cust', {'cust_uids':vsites})
resp = yield self.get_lsp_by_cust(req)
cust_lsps = resp['result']
vsite_lsp = {}
for c in cust_lsps:
r_lsps = []
for p in cust_lsps[c]:
r_p = self.map_obj_key(p, self.application.lsp_attrib_map)
r_lsps.append(r_p)
vsite_lsp[c] = r_lsps
self.write(json.dumps(vsite_lsp))
self.finish()
pass
class vsite_lsp_handler(lsp_handler):
@tornado.gen.coroutine
@swagger.operation(nickname='get_vsite_by_lsp')
def get(self, lsp_uid):
"""
@param lsp_uid:
@type lsp_uid: L{string}
@in lsp_uid: path
@required lsp_uid: True
@rtype: map of {lsp_uid:[list of vsite]}
@description: Get the vsite flow specs in the LSP.
@notes: GET /visite/lsp/{lsp_uids}
"""
rpc = base_rpc('')
req = rpc.form_request('lsp_man_get_cust_by_lsp', {'lsps':[{'uid':x} for x in lsp_uid.split(',')]})
resp = yield self.get_cust_by_lsp(req)
self.write(resp['result'])
self.finish()
pass
class vsite_flow_policy_handler(flow_sched_handler):
@tornado.gen.coroutine
@swagger.operation(nickname='create_flow_policy')
def post(self):
"""
@param body: body
@type body: Json
@in body: body
@return 200: flow policy was created.
@raise 500: invalid input
@description: Create new flow policy to scheduling the flow spec of a vsite to a specific LSP.
@notes: POST flow-policy
<br /> Request body sample <br />
{"lsp_uid": "xxx", "vsite_uid": "yyy"}
"""
rpc = base_rpc('')
rest_req = json.loads(self.request.body)
req = rpc.form_request('flow_sched_add_flow', {'lsp_uid': rest_req['lsp_uid'], 'cust_uid':rest_req['vsite_uid']})
resp = yield self.add_flow(req)
self.write('')
self.finish()
pass
@tornado.gen.coroutine
@swagger.operation(nickname='delete_flow_policy')
def delete(self):
"""
@param lsp_uid:
@type lsp_uid: L{string}
@in lsp_uid: query
@required lsp_uid: True
@param vsite_uid:
@type vsite_uid: L{string}
@in lsp_uid: query
@required vsite_uid: True
@return 200: flow policy was deleted.
@raise 500: invalid input
@description: Delete a flow policy.
@notes: DELETE flow-policy?lsp_uid=xxx&vsite_uid=yyy
"""
rpc = base_rpc('')
lsp = self.get_argument('lsp_uid')
vsite = self.get_argument('vsite_uid')
req = rpc.form_request('flow_sched_del_flow', {'lsp_uid': lsp, 'cust_uid':vsite})
resp = yield self.del_flow(req)
self.write('')
self.finish()
pass
def openo_related_service_query():
#{"protocol": "REST", "url": "/openoapi/sdnovsitemgr/v1", "visualRange": 1, "version": "v1", "serviceName": "vsite_mgr", "nodes": [{"ip": "127.0.0.1", "port": 8600, "ttl": 0}]}
# print('customer_url---:' + microsrv_cust_url)
customer_server_resp = openo_query_service('vsite_mgr', 'v1')
# microsrv_cust_url = 'http://127.0.0.1:33771/'
if customer_server_resp is not None and 'nodes' in customer_server_resp:
for item in customer_server_resp['nodes']:
if 'ip' in item:
microsrvurl_dict['microsrv_cust_url'] = 'http://' + item['ip'] + ':33771'
break
# print('customer_url+++:' + microsrv_cust_url)
#{"protocol": "REST", "url": "/openoapi/sdnomonitoring/v1", "visualRange": 1, "version": "v1", "serviceName": "link_flow_monitor", "nodes": [{"ip": "127.0.0.1", "port": 8610, "ttl": 0}]}
# print('te_topo_man_url---:' + te_topo_man_url)
topo_serv_resp = openo_query_service('link_flow_monitor', 'v1')
#te_topo_man_url = 'http://127.0.0.1:32769'
if topo_serv_resp is not None and 'nodes' in topo_serv_resp:
for item in topo_serv_resp['nodes']:
if 'ip' in item:
microsrvurl_dict['te_topo_man_url'] = 'http://' + item['ip'] + ':32769'
break
# print('te_topo_man_url+++:' + te_topo_man_url)
#{"driverInfo": {"protocol": "REST", "instanceID": "sdno-driver-ct-te_ID", "ip": "127.0.0.1", "driverName": "sdno-driver-ct-te", "services": [{"support_sys": [{"version": "v1", "type": "ct_te_driver"}], "service_url": "/openoapi/sdno-driver-ct-te/v1/"}], "port": 8670}}
# print('microsrv_controller_url---:' + microsrv_controller_url)
ms_controller_resp = openo_query_driver('sdno-driver-ct-te', 'sdno-driver-ct-te_ID', 'v1')
# microsrv_controller_url = 'http://10.9.63.140:12727/'
if ms_controller_resp is not None:
for item in ms_controller_resp:
if 'driverName' in item and 'sdno-driver-ct-te' == item['driverName']:
if 'ip' in item:
microsrvurl_dict['microsrv_controller_url'] = 'http://' + item['ip'] + ':12727'
break
# print('microsrv_controller_url+++:' + microsrv_controller_url)
pass
class swagger_app(swagger.Application):
def __init__(self):
settings = {
'static_path': os.path.join(os.path.dirname(__file__), 'sdnooptimize.swagger')
}
handlers = [(r'/openoapi/sdnooptimize/v1/lsp/([^/]+)', lsp_get_handler),
(r'/openoapi/sdnooptimize/v1/lsp', lsp_post_handler),
(r'/openoapi/sdnooptimize/v1/lsp/vsite/([^/]+)', lsp_vsite_handler),
(r'/openoapi/sdnooptimize/v1/vsite/lsp/([^/]+)', vsite_lsp_handler),
(r'/openoapi/sdnooptimize/v1/flow-policy', vsite_flow_policy_handler),
(r'/openoapi/sdnooptimize/v1/(swagger.json)', tornado.web.StaticFileHandler, dict(path=settings['static_path']))
]
super(swagger_app, self).__init__(handlers, **settings)
self.equips = {}
self.lsp_attrib_map = {'from_router_uid':'ingress_node_uid', 'to_router_uid':'egress_node_uid',
'bandwidth':'bandwidth', 'from_router_name':'ingress_node_name',
'to_router_name':'egress_node_name', 'name':'lsp_name'
}
tornado.ioloop.IOLoop.instance().add_timeout(
datetime.timedelta(milliseconds=500),
openo_register, 'mpls-optimizer', 'v1', '/openoapi/sdnooptimize/v1',
microsrvurl_dict['te_lsp_rest_host'], microsrvurl_dict['te_lsp_rest_port'] )
tornado.ioloop.IOLoop.instance().add_timeout(
datetime.timedelta(milliseconds=1000), openo_related_service_query)
def strip_parse_from_argv():
options.define("uniq", default="2837492392992775", help="service unique id")
options.define("localurl", default=microsrvurl_dict['te_lsp_rest_host'] + te_host_port_divider + str(microsrvurl_dict['te_lsp_rest_port']), help="service host:port")
options.define("msburl", default=microsrvurl_dict['te_msb_rest_host'] + te_host_port_divider + str(microsrvurl_dict['te_msb_rest_port']), help="micro service bus host:port")
tornado.options.parse_command_line()
microsrvurl_dict['te_lsp_rest_host'] = options.localurl.split(':')[0]
microsrvurl_dict['te_lsp_rest_port'] = int(options.localurl.split(':')[1])
microsrvurl_dict['openo_ms_url'] = te_protocol + options.msburl + openo_ms_url_prefix
microsrvurl_dict['openo_dm_url'] = te_protocol + options.msburl + openo_dm_url_prefix
microsrvurl_dict['openo_esr_url'] = te_protocol + options.msburl + openo_esr_url_prefix
microsrvurl_dict['openo_brs_url'] = te_protocol + options.msburl + openo_brs_url_prefix
pass
if __name__ == '__main__':
strip_parse_from_argv()
swag = swagger_app() # For REST interface
app = lsp_app(swag)
server = tornado.httpserver.HTTPServer(app)
server.listen(32775)
server_swag = tornado.httpserver.HTTPServer(swag)
server_swag.listen(microsrvurl_dict['te_lsp_rest_port'])
tornado.ioloop.IOLoop.instance().start()
|
py | b40bc8730e60fdd98066da4e26353dbfa419a176 | from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import IntegrityError, models, router, transaction
from django.utils.text import slugify
from django.utils.translation import gettext, gettext_lazy as _
try:
from unidecode import unidecode
except ImportError:
def unidecode(tag):
return tag
class TagBase(models.Model):
name = models.CharField(verbose_name=_("Name"), max_length=100)
slug = models.SlugField(verbose_name=_("Slug"), unique=True, max_length=100)
def __str__(self):
return self.name
def __gt__(self, other):
return self.name.lower() > other.name.lower()
def __lt__(self, other):
return self.name.lower() < other.name.lower()
class Meta:
abstract = True
def save(self, *args, **kwargs):
if self._state.adding and not self.slug:
self.slug = self.slugify(self.name)
using = kwargs.get("using") or router.db_for_write(
type(self), instance=self
)
# Make sure we write to the same db for all attempted writes,
# with a multi-master setup, theoretically we could try to
# write and rollback on different DBs
kwargs["using"] = using
# Be oportunistic and try to save the tag, this should work for
# most cases ;)
try:
with transaction.atomic(using=using):
res = super().save(*args, **kwargs)
return res
except IntegrityError:
pass
# Now try to find existing slugs with similar names
slugs = set(
self.__class__._default_manager.filter(
slug__startswith=self.slug
).values_list("slug", flat=True)
)
i = 1
while True:
slug = self.slugify(self.name, i)
if slug not in slugs:
self.slug = slug
# We purposely ignore concurrecny issues here for now.
# (That is, till we found a nice solution...)
return super().save(*args, **kwargs)
i += 1
else:
return super().save(*args, **kwargs)
def slugify(self, tag, i=None):
slug = slugify(unidecode(tag))
if i is not None:
slug += "_%d" % i
return slug
class Tag(TagBase):
class Meta:
verbose_name = _("Tag")
verbose_name_plural = _("Tags")
app_label = "taggit"
class ItemBase(models.Model):
def __str__(self):
return gettext("%(object)s tagged with %(tag)s") % {
"object": self.content_object,
"tag": self.tag,
}
class Meta:
abstract = True
@classmethod
def tag_model(cls):
field = cls._meta.get_field("tag")
return field.remote_field.model
@classmethod
def tag_relname(cls):
field = cls._meta.get_field("tag")
return field.remote_field.related_name
@classmethod
def lookup_kwargs(cls, instance):
return {"content_object": instance}
class TaggedItemBase(ItemBase):
tag = models.ForeignKey(
Tag, related_name="%(app_label)s_%(class)s_items", on_delete=models.CASCADE
)
class Meta:
abstract = True
@classmethod
def tags_for(cls, model, instance=None, **extra_filters):
kwargs = extra_filters or {}
if instance is not None:
kwargs.update({"%s__content_object" % cls.tag_relname(): instance})
return cls.tag_model().objects.filter(**kwargs)
kwargs.update({"%s__content_object__isnull" % cls.tag_relname(): False})
return cls.tag_model().objects.filter(**kwargs).distinct()
class CommonGenericTaggedItemBase(ItemBase):
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
verbose_name=_("Content type"),
related_name="%(app_label)s_%(class)s_tagged_items",
)
content_object = GenericForeignKey()
class Meta:
abstract = True
@classmethod
def lookup_kwargs(cls, instance):
return {
"object_id": instance.pk,
"content_type": ContentType.objects.get_for_model(instance),
}
@classmethod
def tags_for(cls, model, instance=None, **extra_filters):
tag_relname = cls.tag_relname()
kwargs = {
"%s__content_type__app_label" % tag_relname: model._meta.app_label,
"%s__content_type__model" % tag_relname: model._meta.model_name,
}
if instance is not None:
kwargs["%s__object_id" % tag_relname] = instance.pk
if extra_filters:
kwargs.update(extra_filters)
return cls.tag_model().objects.filter(**kwargs).distinct()
class GenericTaggedItemBase(CommonGenericTaggedItemBase):
object_id = models.IntegerField(verbose_name=_("Object id"), db_index=True)
class Meta:
abstract = True
class GenericUUIDTaggedItemBase(CommonGenericTaggedItemBase):
object_id = models.UUIDField(verbose_name=_("Object id"), db_index=True)
class Meta:
abstract = True
class TaggedItem(GenericTaggedItemBase, TaggedItemBase):
class Meta:
verbose_name = _("Tagged Item")
verbose_name_plural = _("Tagged Items")
app_label = "taggit"
index_together = [["content_type", "object_id"]]
unique_together = [["content_type", "object_id", "tag"]]
|
py | b40bc88be7d9975ca6ad22574a73918dc37e3371 | import os
import numpy as np
import time
import multiprocessing as mp
import csv
import socket
import datetime
import math
import glob
from pypushexp import PushSim
# # input - [recorded item]
# [weight] : 48
# [height] : 160
# [crouch_angle] (deg)
# [step_length_ratio]
# [halfcycle_duration_ratio]
# [push_step] : 8
# [push_duration] (sec) : .2
# [push_force] (N)
# [push_start_timing] (half gait cycle percent)
#
# # output
# [pushed_length] (m) : sim.out_pushed_length
# [pushed_steps] : sim.out_pushed_steps
# [push_strength] : abs(push_force * push_duration / weight)
# [step_length] (m) : sim.getPushedLength()
# [walking_speed] (m/s) : sim.getWalkingSpeed()
# [halfcycle_duration] (s) : sim.getStepLength() /sim.getWalkingSpeed()
#
# # output for hospital
# [distance] : pushed_length * 1000.
# [speed] : walking_speed * 1000.
# [force] : push_strength * 1000.
# [stride] : step_length * 1000.
# [start_timing_time_ic] = sim.start_timing_time_ic
# [mid_timing_time_ic] = sim.mid_timing_time_ic
# [start_timing_foot_ic] = sim.getStartTimingFootIC()
# [mid_timing_foot_ic] = sim.getMidTimingFootIC()
# [start_timing_time_fl] = sim.getStartTimingTimeFL()
# [mid_timing_time_fl] = sim.getMidTimingTimeFL()
# [start_timing_foot_fl] = sim.getStartTimingFootFL()
# [mid_timing_foot_fl] = sim.getMidTimingFootFL()
# # not used
# subject no
# sex
# left leg length
# right leg length
# stride
# speed
# experiment
# file name
# trial no
# push timing : 'left stance'
# push direction : 'from left'
# normalized push length
# push length until first step
# push end timing (time)
# push end timing (foot pos)
# return during first step
# push duration
# push start time
def gettimestringisoformat():
return datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
def worker_simulation(sim, param):
try:
push_step, push_duration,\
crouch_angle, step_length_ratio, walk_speed_ratio, push_force, push_start_timing, crouch_label,\
weight, height, ith, q = param
# print(int(crouch_angle), step_length_ratio, walk_speed_ratio, push_force, push_start_timing)
sim.setParamedStepParams(int(crouch_angle), step_length_ratio, walk_speed_ratio)
sim.setPushParams(8, 0.2, 0., 0.)
print(step_length_ratio, walk_speed_ratio)
stopcode = sim.simulate()
# stopcode = 0
if stopcode in [0, 3, 4]:
cot = sim.getCostOfTransport()
walking_speed = sim.getWalkingSpeed()
q.put((ith, crouch_angle, walking_speed, cot))
except IndexError:
pass
def write_start(csvfilepath):
csvfile = open(csvfilepath, 'w')
csvfile.write('type,ith,crouch_angle,speed,cot\n')
return csvfile
def write_body(q, csvfile):
while True:
try:
ith, crouch_angle, walking_speed, cot = q.get(False)
csvfile.write('torque,%d,%s,%s,%s\n' % (ith, crouch_angle, walking_speed, cot))
csvfile.flush()
except:
break
def write_end(csvfile):
csvfile.close()
def simulate(sim, launch_order, num, option_str=''):
#=======================================================================
# settings
#=======================================================================
TEST = True if launch_order is None else False
# TEST = True
# TEST = False
weight = 72
height = 170
push_step = 8
push_duration = .2
test_params = [] # element: (crouch_angle, step_length_ratio, halfcycle_duration_ratio, push_force, push_start_timing)
# ===========================================================================
#
# ===========================================================================
if TEST:
# test
additional_str = ''
num = 2
# num = 5000
mean_crouch = [0, 20, 30, 60]
else:
# real
all_mean_crouch = [0, 20, 30, 60]
mean_crouch = [all_mean_crouch[launch_order % len(all_mean_crouch)]]
additional_str = '_%ddeg__push' % mean_crouch[0]
# if launch_order==0:
# param_opt_result = '130810_113234_0_60_push'
# additional_str = '_0_60_push'
# elif launch_order==2:
# param_opt_result = '130810_161152_0_30_60_push'
# additional_str = '_0_30_60_push'
# =======================================================================
# set logger
# =======================================================================
outDir = os.path.dirname(os.path.abspath(__file__)) + '/results/'
if not os.path.exists(outDir):
os.makedirs(outDir)
csvfilepath = outDir + 'COT_' +option_str + '_' + gettimestringisoformat() + '_' + str(num) + 'trials_' + socket.gethostname() + '.csv'
print('start logging at', gettimestringisoformat())
print()
print('<simulation setting>')
# =======================================================================
# test2 : multivariate normal distribution
# =======================================================================
stride_means = [1.1262070300, 0.9529737358, 0.9158506655, 0.8755451448]
speed_means = [0.9943359644, 0.8080297151, 0.7880050552, 0.7435198328]
stride_vars = [0.03234099289, 0.02508595114, 0.02772452640, 0.02817863267]
stride_speed_covars = [0.03779884365, 0.02225320798, 0.02906793442, 0.03000639027]
speed_vars = [0.06929309644, 0.04421889347, 0.04899931048, 0.05194827755]
# crouch angle
# mean_crouch = [0,20,30,60]
std_crouch = 1
# step length
motion_stride_bvh_after_default_param = 1.1886
experi_stride_mean = stride_means[launch_order]
experi_stride_std = math.sqrt(stride_vars[launch_order])
mean_length_ratio = experi_stride_mean / motion_stride_bvh_after_default_param
std_length_ratio = experi_stride_std / motion_stride_bvh_after_default_param
# walk speed
speed_bvh_after_default_param = 0.9134
experi_speed_mean = speed_means[launch_order]
experi_speed_std = math.sqrt(speed_vars[launch_order])
mean_speed_ratio = experi_speed_mean / speed_bvh_after_default_param
std_speed_ratio = experi_speed_std / speed_bvh_after_default_param
# push strength
mean_strength = .535
std_strength = .096
mean_force = -(mean_strength*weight/push_duration)
std_force = (std_strength*weight/push_duration)
# push timing
mean_timing = 34
std_timing = 21
if TEST:
np.set_printoptions(precision=4, linewidth=200)
# for i in range(len(mean_crouch)):
# mean = [mean_crouch[i], mean_length_ratio, mean_duration_ratio, mean_force, mean_timing, mean_crouch[i]]
# cov = np.diag( [std_crouch**2, std_length_ratio**2, std_duration_ratio**2, std_force**2, std_timing**2, 0])
for i in range(len(mean_crouch)):
mean = [mean_crouch[i], mean_length_ratio, mean_speed_ratio, mean_force, mean_timing, mean_crouch[i]]
cov = np.diag([0 , std_length_ratio**2, std_speed_ratio**2, std_force**2, std_timing**2, 0])
cov[1, 2] = stride_speed_covars[i] / speed_bvh_after_default_param / motion_stride_bvh_after_default_param
cov[2, 1] = stride_speed_covars[i] / speed_bvh_after_default_param / motion_stride_bvh_after_default_param
if len(test_params) == 0:
test_params = np.random.multivariate_normal(mean, cov, num)
else:
test_params = np.vstack((test_params, np.random.multivariate_normal(mean, cov, num)))
# no negative crouch angle
for i in range(len(test_params)):
test_params[i][0] = abs(test_params[i][0])
test_params[i][2] = abs(test_params[i][2])
test_params[i][3] = -abs(test_params[i][3])
# print(test_params)
print()
print('multivariate normal distribution')
print()
print('mean_crouch', mean_crouch)
print('std_crouch', std_crouch)
print()
print('motion_step_stride', motion_stride_bvh_after_default_param)
print('experi_step_length_mean', experi_stride_mean)
print('experi_step_length_std', experi_stride_std)
print('mean_length_ratio', mean_length_ratio)
print('std_length_ratio', std_length_ratio)
print()
print('motion_speed', speed_bvh_after_default_param)
print('experi_speed_mean', experi_speed_mean)
print('experi_speed_std', experi_speed_std)
print('mean_speed_ratio', mean_speed_ratio)
print('std_speed_ratio', std_speed_ratio)
print()
print('num', num)
print()
print('total # of simulations', len(test_params))
print()
# =======================================================================
# simulation
# =======================================================================
pt = time.time()
print('<start simulation>')
print('hostname %s ' % socket.gethostname())
print()
q = mp.Manager().Queue()
groupsize = 100
paramgroups = [[] for i in range( len(test_params)//groupsize + 1 )]
ith = 1
for i in range(len(test_params)):
crouch_angle = test_params[i][0]
step_length_ratio = test_params[i][1]
walk_speed_ratio = test_params[i][2]
push_force = test_params[i][3]
push_start_timing = test_params[i][4]
crouch_label = test_params[i][5]
paramgroups[i//groupsize].append((push_step, push_duration,
crouch_angle, step_length_ratio, walk_speed_ratio, push_force, push_start_timing, crouch_label,
weight, height, ith, q))
ith += 1
csvfile = write_start(csvfilepath)
for i in range(len(paramgroups)):
for j in range(len(paramgroups[i])):
print(j)
worker_simulation(sim, paramgroups[i][j])
write_body(q, csvfile)
write_end(csvfile)
print()
_s = time.time() - pt
_h = _s // 3600
_m = _s // 60
_s -= 60 * _m
_m -= 60 * _h
print('elapsed time = %d h:%d m:%d s' % (int(_h), int(_m), int(_s)))
print()
print('end logging at', gettimestringisoformat())
if __name__ == '__main__':
import sys
import re
option = sys.argv[1]
trial_num = int(sys.argv[2])
_metadata_dir = os.path.dirname(os.path.abspath(__file__)) + '/../data/metadata/'
_nn_finding_dir = os.path.dirname(os.path.abspath(__file__)) + '/../nn/*/'
nn_dir = None
if _nn_finding_dir is not None:
nn_dir = glob.glob(_nn_finding_dir + option)[0]
meta_file = _metadata_dir + option + '.txt'
sim = None
if 'muscle' in option:
sim = PushSim(meta_file, nn_dir+'/max.pt', nn_dir+'/max_muscle.pt')
else:
sim = PushSim(meta_file, nn_dir+'/max.pt')
if "all" in option:
simulate(sim, 0, trial_num, option)
simulate(sim, 1, trial_num, option)
simulate(sim, 2, trial_num, option)
simulate(sim, 3, trial_num, option)
else:
crouch = re.findall(r'crouch\d+', option)[0][6:]
simulate(sim, ['0', '20', '30', '60'].index(crouch), trial_num, option)
|
py | b40bc993b41fce2a8445542a2e1a4c38472c8548 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, omar jaber and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
from frappe.utils import nowdate, getdate, get_url
from one_fm.utils import fetch_employee_signature
from one_fm.processor import sendemail
class RequestforPurchase(Document):
def onload(self):
self.set_onload('accepter', frappe.db.get_value('Purchase Settings', None, 'request_for_purchase_accepter'))
self.set_onload('approver', frappe.db.get_value('Purchase Settings', None, 'request_for_purchase_approver'))
def on_submit(self):
self.notify_request_for_material_accepter()
frappe.msgprint(_("Notification sent to purchaser"))
@frappe.whitelist()
def send_request_for_purchase(self):
self.status = "Approved"
self.save()
self.reload()
#self.notify_request_for_material_accepter()
def notify_request_for_material_accepter(self):
if self.accepter:
page_link = get_url("/desk#Form/Request for Purchase/" + self.name)
message = "<p>Please Review the Request for Purchase <a href='{0}'>{1}</a> Submitted by {2}.</p>".format(page_link, self.name, self.requested_by)
subject = '{0} Request for Purchase by {1}'.format(self.status, self.requested_by)
send_email(self, [self.accepter], message, subject)
create_notification_log(subject, message, [self.accepter], self)
# self.status = "Draft Request"
self.save()
self.reload()
@frappe.whitelist()
def make_purchase_order_for_quotation(self):
if self.items_to_order:
for item in self.items_to_order:
create_purchase_order(supplier=item.supplier, request_for_purchase=self.name, item_code=item.item_code,
qty=item.qty, rate=item.rate, delivery_date=item.delivery_date, uom=item.uom, description=item.description,
warehouse=self.warehouse, quotation=item.quotation)
@frappe.whitelist()
def accept_approve_reject_request_for_purchase(self, status, approver, accepter, reason_for_rejection=None):
page_link = get_url("/desk#Form/Request for Purchase/" + self.name)
# Notify Requester
self.notify_requester_accepter(page_link, status, [self.requested_by], reason_for_rejection)
# Notify Approver
if status == 'Accepted' and frappe.session.user == accepter:
message = "<p>Please Review and Approve or Reject the Request for Purchase <a href='{0}'>{1}</a>, Accepted by {2}</p>".format(page_link, self.name, frappe.session.user)
subject = '{0} Request for Purchase by {1}'.format(status, frappe.session.user)
send_email(self, [approver], message, subject)
create_notification_log(subject, message, [approver], self)
#fetch Signature from employee doc using user ID
if status == "Approved" and frappe.session.user == accepter:
signature = fetch_employee_signature(accepter)
if signature:
self.authorized_signatures = signature
self.save(ignore_permissions=True)
else:
frappe.throw(_("Your Signature is missing!"))
# Notify Accepter
if status in ['Approved', 'Rejected'] and frappe.session.user == approver:
self.notify_requester_accepter(page_link, status, [accepter], reason_for_rejection)
self.status = status
self.reason_for_rejection = reason_for_rejection
self.save()
self.reload()
def notify_requester_accepter(self, page_link, status, recipients, reason_for_rejection=None):
message = "Request for Purchase <a href='{0}'>{1}</a> is {2} by {3}".format(page_link, self.name, status, frappe.session.user)
if status == 'Rejected' and reason_for_rejection:
message += " due to {0}".format(reason_for_rejection)
subject = '{0} Request for Purchase by {1}'.format(status, frappe.session.user)
send_email(self, recipients, message, subject)
create_notification_log(subject, message, recipients, self)
def send_email(doc, recipients, message, subject):
if 'Administrator' in recipients:
recipients.remove('Administrator')
if recipients and len(recipients) > 0:
sendemail(
recipients= recipients,
subject=subject,
message=message,
reference_doctype=doc.doctype,
reference_name=doc.name
)
def create_notification_log(subject, message, for_users, reference_doc):
if 'Administrator' in for_users:
for_users.remove('Administrator')
for user in for_users:
doc = frappe.new_doc('Notification Log')
doc.subject = subject
doc.email_content = message
doc.for_user = user
doc.document_type = reference_doc.doctype
doc.document_name = reference_doc.name
doc.from_user = reference_doc.modified_by
doc.insert(ignore_permissions=True)
@frappe.whitelist()
def make_request_for_quotation(source_name, target_doc=None):
doclist = get_mapped_doc("Request for Purchase", source_name, {
"Request for Purchase": {
"doctype": "Request for Supplier Quotation",
"field_map": [
["name", "request_for_purchase"]
],
"validation": {
"docstatus": ["=", 1]
}
},
"Request for Purchase Item": {
"doctype": "Request for Supplier Quotation Item",
"field_map": [
["uom", "uom"]
]
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_quotation_comparison_sheet(source_name, target_doc=None):
doclist = get_mapped_doc("Request for Purchase", source_name, {
"Request for Purchase": {
"doctype": "Quotation Comparison Sheet",
"field_map": [
["name", "request_for_purchase"]
],
"validation": {
"docstatus": ["=", 1]
}
},
"Request for Purchase Item": {
"doctype": "Quotation Comparison Sheet Item"
}
}, target_doc)
rfq = frappe.db.get_value('Request for Supplier Quotation', {'request_for_purchase': doclist.request_for_purchase}, 'name')
doclist.request_for_quotation = rfq if rfq else ''
return doclist
@frappe.whitelist()
def make_purchase_order(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("get_schedule_dates")
target.run_method("calculate_taxes_and_totals")
def update_item(obj, target, source_parent):
target.stock_qty = obj.qty # flt(obj.qty) * flt(obj.conversion_factor)
doclist = get_mapped_doc("Request for Purchase", source_name, {
"Request for Purchase": {
"doctype": "Purchase Order",
"validation": {
"docstatus": ["=", 1],
}
},
"Request for Purchase Quotation Item": {
"doctype": "Purchase Order Item",
"field_map": [
["quotation_item", "supplier_quotation_item"],
["quotation", "supplier_quotation"],
["request_for_material", "request_for_material"],
["request_for_material_item", "request_for_material_item"],
["sales_order", "sales_order"]
],
"postprocess": update_item
}
}, target_doc, set_missing_values)
return doclist
def create_purchase_order(**args):
args = frappe._dict(args)
po_id = frappe.db.exists('Purchase Order',
{'one_fm_request_for_purchase': args.request_for_purchase, 'docstatus': ['<', 2], 'supplier': args.supplier}
)
if po_id:
po = frappe.get_doc('Purchase Order', po_id)
else:
po = frappe.new_doc("Purchase Order")
po.transaction_date = nowdate()
po.set_warehouse = args.warehouse
po.quotation = args.quotation
# po.schedule_date = add_days(nowdate(), 1)
# po.company = args.company
po.supplier = args.supplier
po.is_subcontracted = args.is_subcontracted or "No"
# po.currency = args.currency or frappe.get_cached_value('Company', po.company, "default_currency")
po.conversion_factor = args.conversion_factor or 1
po.supplier_warehouse = args.supplier_warehouse or None
po.one_fm_request_for_purchase = args.request_for_purchase
po.append("items", {
"item_code": args.item_code,
"item_name": args.item_name,
"description": args.description,
"uom": args.uom,
"qty": args.qty,
"rate": args.rate,
"schedule_date": getdate(args.delivery_date) if (args.delivery_date and getdate(nowdate()) < getdate(args.delivery_date)) else getdate(nowdate()),
"expected_delivery_date": args.delivery_date
})
if not args.do_not_save:
po.save(ignore_permissions=True)
# if not args.do_not_submit:
# po.submit()
return po
|
py | b40bc9c01d4434b9a27598e032c23739a2d698ec | import itertools
import re
import diff_match_patch
def side_by_side_diff(old_text, new_text):
"""
Calculates a side-by-side line-based difference view.
Wraps insertions in <ins></ins> and deletions in <del></del>.
"""
def yield_open_entry(open_entry):
""" Yield all open changes. """
ls, rs = open_entry
# Get unchanged parts onto the right line
if ls[0] == rs[0]:
yield (False, ls[0], rs[0])
for l, r in itertools.izip_longest(ls[1:], rs[1:]):
yield (True, l, r)
elif ls[-1] == rs[-1]:
for l, r in itertools.izip_longest(ls[:-1], rs[:-1]):
yield (l != r, l, r)
yield (False, ls[-1], rs[-1])
else:
for l, r in itertools.izip_longest(ls, rs):
yield (True, l, r)
line_split = re.compile(r'(?:\r?\n)')
dmp = diff_match_patch.diff_match_patch()
diff = dmp.diff_main(old_text, new_text)
dmp.diff_cleanupSemantic(diff)
open_entry = ([None], [None])
for change_type, entry in diff:
assert change_type in [-1, 0, 1]
entry = (entry.replace('&', '&')
.replace('<', '<')
.replace('>', '>'))
lines = line_split.split(entry)
# Merge with previous entry if still open
ls, rs = open_entry
line = lines[0]
if line:
if change_type == 0:
ls[-1] = ls[-1] or ''
rs[-1] = rs[-1] or ''
ls[-1] = ls[-1] + line
rs[-1] = rs[-1] + line
elif change_type == 1:
rs[-1] = rs[-1] or ''
rs[-1] += '<ins>%s</ins>' % line if line else ''
elif change_type == -1:
ls[-1] = ls[-1] or ''
ls[-1] += '<del>%s</del>' % line if line else ''
lines = lines[1:]
if lines:
if change_type == 0:
# Push out open entry
for entry in yield_open_entry(open_entry):
yield entry
# Directly push out lines until last
for line in lines[:-1]:
yield (False, line, line)
# Keep last line open
open_entry = ([lines[-1]], [lines[-1]])
elif change_type == 1:
ls, rs = open_entry
for line in lines:
rs.append('<ins>%s</ins>' % line if line else '')
open_entry = (ls, rs)
elif change_type == -1:
ls, rs = open_entry
for line in lines:
ls.append('<del>%s</del>' % line if line else '')
open_entry = (ls, rs)
# Push out open entry
for entry in yield_open_entry(open_entry):
yield entry
|
py | b40bc9d5b41f73064b24296292eee95c1cb8d798 | import zmq
import RPi.GPIO as GPIO
import threading
import time
cw = 1
ccw = 2
direction = cw
speed = 0
enablePin = 18
in1Pin = 23
in2Pin = 24
GPIO.setmode(GPIO.BCM)
GPIO.setup(in1Pin, GPIO.OUT)
GPIO.setup(in2Pin, GPIO.OUT)
GPIO.setup(enablePin, GPIO.OUT)
def CW_Rotate():
GPIO.output(in1Pin, False)
GPIO.output(in2Pin, True)
def CCW_Rotate():
GPIO.output(in1Pin, True)
GPIO.output(in2Pin, False)
def MotorAction():
pwm = GPIO.PWM(enablePin, 500)
pwm.start(0)
CW_Rotate()
while(1):
if(direction == cw):
CW_Rotate()
else:
CCW_Rotate()
pwm.ChangeDutyCycle(speed)
time.sleep(0.01)
if __name__ == "__main__":
t = threading.Thread(target=MotorAction)
t.start()
ctx = zmq.Context.instance()
subscriber = ctx.socket(zmq.SUB)
service_sensor_sub = "tcp://192.168.43.87:9999"
subscriber.connect(service_sensor_sub)
subscriber.set_string(zmq.SUBSCRIBE, unicode("CW:"))
subscriber.set_string(zmq.SUBSCRIBE, unicode("CC:"))
while(1):
buf = subscriber.recv_string()
if(buf):
if(buf[0:3] == "CW:"):
direction = cw
else:
direction = ccw
speed = int(buf[3:5])
print(buf)
|
py | b40bca2082d3306788dd563b01f63a212c6db473 | from hyara_lib.integration.binaryninja_api import HyaraBinaryNinja
import PySide2.QtWidgets as QtWidgets
from PySide2.QtCore import Qt
from binaryninjaui import DockHandler, DockContextHandler, UIActionHandler
class HyaraDockWidget(QtWidgets.QWidget, DockContextHandler):
def __init__(self, parent, name, data):
QtWidgets.QWidget.__init__(self, parent)
DockContextHandler.__init__(self, self, name)
self.actionHandler = UIActionHandler()
self.actionHandler.setupActionHandler(self)
self.HyaraBinaryNinja = HyaraBinaryNinja()
self.setLayout(self.HyaraBinaryNinja.layout)
def shouldBeVisible(self, view_frame):
if view_frame is None:
return False
else:
return True
def notifyViewChanged(self, view_frame):
pass
@staticmethod
def create_widget(name, parent, data=None):
return HyaraDockWidget(parent, name, data)
dock_handler = DockHandler.getActiveDockHandler()
dock_handler.addDockWidget(
"Hyara",
HyaraDockWidget.create_widget,
Qt.BottomDockWidgetArea,
Qt.Horizontal,
False,
)
|
py | b40bcab99fd755a23a1b05fdd218afc0d0890695 | #!/usr/bin/env python3
import subprocess
import os
import sys
import json
sys.path.append("../")
sys.path.append("../../system/lib/")
sys.path.append("../array/")
import json_parser
import pos
import cli
import api
import UPDATE_VOL_QOS_BASIC_2
import volume
NAME = UPDATE_VOL_QOS_BASIC_2.NAME
SIZE = UPDATE_VOL_QOS_BASIC_2.SIZE
IOPS = (2**64-1) // 1000 # Refer to SRS: http://globalwiki.itplatform.sec.samsung.net:8099/display/ibof/2.3.1+%5BIBOFOS_SW_FRID_0301%5D+Create+Volume
BW = 0
ARRAYNAME = UPDATE_VOL_QOS_BASIC_2.ARRAYNAME
def clear_result():
if os.path.exists( __file__ + ".result"):
os.remove( __file__ + ".result")
def check_result(detail):
expected_list = []
expected_list.append(volume.Volume(NAME, SIZE, IOPS, BW))
data = json.loads(detail)
actual_list = []
for item in data['Response']['result']['data']['volumes']:
vol = volume.Volume(item['name'], item['total'], item['maxiops'], item['maxbw'])
actual_list.append(vol)
if len(actual_list) != len(expected_list):
return "fail"
for actual in actual_list:
checked = False
for expected in expected_list:
if actual.name == expected.name and actual.total == expected.total and actual.maxiops == expected.maxiops and actual.maxbw == expected.maxbw:
checked = True
break
if checked == False:
return "fail"
return "pass"
def set_result():
out = cli.list_volume(ARRAYNAME)
result = check_result(out)
code = json_parser.get_response_code(out)
with open(__file__ + ".result", "w") as result_file:
result_file.write(result + " (" + str(code) + ")" + "\n" + out)
def execute():
clear_result()
UPDATE_VOL_QOS_BASIC_2.execute()
out = cli.update_volume_qos(NAME, str(IOPS), str(BW), ARRAYNAME)
return out
if __name__ == "__main__":
if len(sys.argv) >= 2:
pos.set_addr(sys.argv[1])
execute()
set_result()
pos.flush_and_kill_pos()
|
py | b40bcad17745505ec678230ed9798963e9fa2b92 | # -*- coding: utf-8 -*-
#
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "LICENSE" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# AWS SDK for PHP Developer Guide documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 14 17:27:57 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.extlinks']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AWS PHP Developer Guide'
copyright = u'2019, Amazon Web Services'
author = u'Amazon Web Services'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'3.0'
# The full version, including alpha/beta/rc tags.
release = u'3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'venv']
sys.path.append(os.path.abspath('_ext/'))
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AWSPHPDeveloperGuide'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AWSPHPDeveloperGuide.tex', project,
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'awsphpdeveloperguide', project,
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AWSPHPDeveloperGuide', project,
author, 'AWSPHPDeveloperGuide', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# default code language for syntax highlighting
highlight_language = 'php'
if 'extlinks' not in vars():
extlinks = {}
# These URLs make maintaining the extlinks easier.
aws_php_api_url = 'https://docs.aws.amazon.com/aws-sdk-php/v3/api'
# extlinks['role'] = (url_string, term_prepended_by)
# a generic link to the AWS SDK reference docs. Doesn't work well in a frame.
extlinks['aws-php-ref'] = (aws_php_api_url + '/%s', '')
# a link to a class within the AWS SDK -- can use frames.
extlinks['aws-php-class'] = (aws_php_api_url + '/%s', '')
# links to examples for a particular service (ex: :sdk-examples-php-s3:`ListTables.php`)
samples_url = 'https://github.com/awsdocs/aws-doc-sdk-examples/'
for svc in [
'cloudwatch',
'cloudwatchevents',
'dynamodb',
'ec2',
'iam',
's3',
'sqs',
]:
extlinks['sdk-examples-php-%s' % svc] = (samples_url +
'blob/master/php/example_code/{svc}/'.format(svc=svc) +
'%s', '')
for svc in [
'pinpoint'
]:
extlinks['sdk-examples-php-%s' % svc] = (samples_url +
'blob/master/php/example_code/{svc}'.format(svc=svc) +
'%s', '')
|
py | b40bcca8471cda42d1e74d825db9f8539fc957d5 | from django.apps import AppConfig
class CaptchaConfig(AppConfig):
name = 'captcha'
|
py | b40bccf39660fac0662ab67b2821d97880ddbf15 | # -*- coding: utf-8 -*-
from PySide2.QtCore import Signal
from PySide2.QtWidgets import QWidget
from .......GUI.Dialog.DMachineSetup.SLamParam.DAVDuct.PVentCirc.PVentCirc import (
PVentCirc,
)
from .......GUI.Dialog.DMachineSetup.SLamParam.DAVDuct.PVentPolar.PVentPolar import (
PVentPolar,
)
from .......GUI.Dialog.DMachineSetup.SLamParam.DAVDuct.PVentTrap.PVentTrap import (
PVentTrap,
)
from .......GUI.Dialog.DMachineSetup.SLamParam.DAVDuct.PVentUD.PVentUD import (
PVentUD,
)
from .......GUI.Dialog.DMachineSetup.SLamParam.DAVDuct.WVent.Ui_WVent import Ui_WVent
# List to convert index of combobox to slot type
PAGE_INDEX = [PVentCirc, PVentTrap, PVentPolar, PVentUD]
INIT_INDEX = [wid.hole_type for wid in PAGE_INDEX]
HOLE_NAME = [wid.hole_name for wid in PAGE_INDEX]
class WVent(Ui_WVent, QWidget):
"""Widget to setup a Ventilation in the list"""
# Signal to DMachineSetup to know that the save popup is needed
saveNeeded = Signal()
def __init__(self, lam, index):
"""Initialize the widget according the current lamination
Parameters
----------
self : WVent
A WVent widget
lam : Lamination
current lamination to edit
index : int
Index of the ventilation in the list to update
"""
# Build the interface according to the .ui file
QWidget.__init__(self)
self.setupUi(self)
self.obj = lam
self.index = index
# Fill the combobox with the available slot
self.c_vent_type.clear()
for hole in HOLE_NAME:
self.c_vent_type.addItem(hole)
# Avoid erase all the parameters when navigating though the vents
self.previous_vent = dict()
for vent_type in INIT_INDEX:
self.previous_vent[vent_type] = None
self.c_vent_type.setCurrentIndex(INIT_INDEX.index(type(lam.axial_vent[index])))
# Regenerate the pages with the new values
self.w_vent.setParent(None)
self.w_vent = PAGE_INDEX[self.c_vent_type.currentIndex()](
lam=lam, vent=lam.axial_vent[index]
)
# Refresh the GUI
self.main_layout.removeWidget(self.w_vent)
self.main_layout.insertWidget(1, self.w_vent)
# Connect the slot/signel
self.c_vent_type.currentIndexChanged.connect(self.set_vent_type)
def set_vent_type(self, c_index):
"""Initialize self.obj with the vent corresponding to index
Parameters
----------
self : WVent
A WVent object
c_index : index
Index of the selected vent type in the combobox
"""
# Save the vent
vent = self.obj.axial_vent[self.index]
self.previous_vent[type(vent)] = vent
# Call the corresponding constructor
if self.previous_vent[INIT_INDEX[c_index]] is None:
# No previous vent of this type
self.obj.axial_vent[self.index] = INIT_INDEX[c_index]()
self.obj.axial_vent[self.index]._set_None() # No default value
else: # Load the previous vent of this type
self.obj.axial_vent[self.index] = self.previous_vent[INIT_INDEX[c_index]]
# Update the GUI
self.w_vent.setParent(None)
self.w_vent = PAGE_INDEX[c_index](
lam=self.obj, vent=self.obj.axial_vent[self.index]
)
# Refresh the GUI
self.main_layout.removeWidget(self.w_vent)
self.main_layout.insertWidget(1, self.w_vent)
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def check(self):
"""Check that the current machine have all the needed field set
Parameters
----------
self : WVent
A WVent object
Returns
-------
error : str
Error message (return None if no error)
"""
return self.w_vent.check()
|
py | b40bcd7e131c2192d230388c87d2555c5ee5fbe0 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg_grad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = '_'.join(['test', op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError('Test %s defined more than once' % test_name)
setattr(test, test_name, fn)
class ShapeTest(test_lib.TestCase):
def testBatchGradientUnknownSize(self):
with self.test_session():
batch_size = constant_op.constant(3)
matrix_size = constant_op.constant(4)
batch_identity = array_ops.tile(
array_ops.expand_dims(
array_ops.diag(array_ops.ones([matrix_size])), 0),
[batch_size, 1, 1])
determinants = linalg_ops.matrix_determinant(batch_identity)
reduced = math_ops.reduce_sum(determinants)
sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), sum_grad.eval())
class MatrixUnaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
def Test(self):
with self.test_session():
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
b = functor_(a, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else 0.05
theoretical, numerical = gradient_checker.compute_gradient(
a,
a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=a_np,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class MatrixBinaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBinaryFunctorGradientTest(functor_,
dtype_,
shape_,
float32_tol_fudge=1.0,
**kwargs_):
def Test(self):
with self.test_session():
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
b_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
b = constant_op.constant(b_np)
c = functor_(a, b, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else float32_tol_fudge * 0.04
# The gradients for a and b may be of very different magnitudes,
# so to not get spurious failures we test them separately.
for factor, factor_init in [a, a_np], [b, b_np]:
theoretical, numerical = gradient_checker.compute_gradient(
factor,
factor.get_shape().as_list(),
c,
c.get_shape().as_list(),
x_init_value=factor_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == '__main__':
# Tests for gradients of binary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
for adjoint in False, True:
shape = extra + (size, size)
name = '%s_%s_adj_%s' % (dtype.__name__, '_'.join(map(str, shape)),
str(adjoint))
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixSolveGradient',
name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_solve, dtype, shape, adjoint=adjoint))
for lower in True, False:
name = '%s_low_%s' % (name, lower)
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixTriangularSolveGradient',
name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_triangular_solve,
dtype,
shape,
float32_tol_fudge=4.0,
adjoint=adjoint,
lower=lower))
# Tests for gradients of unary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
shape = extra + (size, size)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixInverseGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_inverse,
dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixDeterminantGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_determinant,
dtype, shape))
# Tests for gradients of matrix_solve_ls
for dtype in np.float32, np.float64:
for rows in 2, 5, 10:
for cols in 2, 5, 10:
for l2_regularization in 1e-6, 0.001, 1.0:
shape = (rows, cols)
name = '%s_%s_%s' % (dtype.__name__, '_'.join(map(str, shape)),
l2_regularization)
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixSolveLsGradient',
name,
_GetMatrixBinaryFunctorGradientTest(
lambda a, b, l=l2_regularization: linalg_ops.matrix_solve_ls(a, b, l),
dtype,
shape,
float32_tol_fudge=4.0))
test_lib.main()
|
py | b40bce8b5f65c705b77c0dcde643546d14cf7daa | # -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:1828 ms, 在所有 Python3 提交中击败了9.51% 的用户
内存消耗:13.7 MB, 在所有 Python3 提交中击败了57.59% 的用户
解题思路:
递归。 思路参考了https://leetcode-cn.com/problems/predict-the-winner/solution/xie-gei-suan-fa-ru-men-zhe-wo-zi-ji-de-pythonti-ji/
主要记录下自己学习到的东西
对于列表的递归,使用指针是更方便的
具体实现见代码注释
"""
class Solution:
def PredictTheWinner(self, nums: List[int]) -> bool:
def choose(l, r, reverse): # 通过左右指针来表示取左侧还是右侧数字, 通过reverse表示轮谁取数字, 1为玩家一,-1为玩家二
if l == r: # 省单个元素,直接取
return reverse * nums[l]
get_left = reverse * nums[l] + choose(l+1, r, -reverse) # 取左侧数字, 左指移动,玩家切换
get_right = reverse * nums[r] + choose(l, r-1, -reverse) # 取右侧数字,右指针移动,玩家切换
if reverse == 1: # 如果玩家一取,则返回分数最大值, 玩家二取,则返回分数最小值. 目的是使最终的玩家一分数尽可能大
return max(get_left, get_right)
else:
return min(get_left, get_right)
n = len(nums)
if n % 2 == 0: # 偶数个数值,则玩家一二均可取到某一值,可能赢
return True
else:
if choose(0, n-1, 1) >= 0: # 最终分数为玩家一尽可能大的分数,如果大于0,则玩家一可能赢
return True
else:
return False
"""
执行用时:28 ms, 在所有 Python3 提交中击败了100.00% 的用户
内存消耗:13.6 MB, 在所有 Python3 提交中击败了81.58% 的用户
解题思路:
动态规划
例:
i j
[1, 2, 3, 4, 5]
1 2 3 4 5
1 1 1 2 2 3
2 0 2 1 3 2
3 0 0 3 1 4
4 0 0 0 4 1
5 0 0 0 0 5
其中 i,j为指针,指向 字符串
dp[i][j] 表示只有 nums[i]~nums[j]时的分数差
则 dp[i][i] = nums[i] 只有一个数字时,玩家一先拿,则 分数差为nums[i]
则 dp[i][j] = max(nums[j]-dp[i][j-1], num[i]-dp[i+1][j])
i~j 的差值,等于 nums[i] 与 i+1~j的分数差, 与 nums[j] 与 i~j-1 的分数差的 最大值。
因为玩家一会先选,则必然从头尾拿,
nums[i]- dp[i+1][j] 表示从左侧拿
nums[j]- dp[i][j-1] 表示从右侧拿
"""
class Solution:
def PredictTheWinner(self, nums: List[int]) -> bool:
n = len(nums)
if n % 2 == 0:
return True
dp = [[0 for _ in range(n)] for _ in range(n)]
for i in range(n):
dp[i][i] = nums[i]
for j in range(1, n):
for i in range(j - 1, -1, -1):
dp[i][j] = max(nums[i] - dp[i + 1][j], nums[j] - dp[i][j - 1])
if dp[0][-1] >= 0:
return True
else:
return False
|
py | b40bceb6a5505dac5249509a8f4afe0d033bcd29 | from gym_space_racer.envs.rocket_racer_env import RocketRacerEnv
|
py | b40bcf5009fc56b3572dfc439a0d862e17842169 | from flask import Flask,request,jsonify
import distilbert_model as model
app = Flask(__name__)
@app.route('/')
def hello():
return 'Congrats ! server is working'
@app.route('/get_sentiment',methods=['POST'])
def get_sentiment():
tx = request.get_json(force=True)
text = tx['Review']
sent = model.get_prediction(text)
return jsonify(result = sent)
if __name__=='__main__':
app.run(host='0.0.0.0',port=5000 ,debug=True,use_reloader=False) |
py | b40bcfc0420338abd096159c494d4b672bd17aff | import pytest
from ckanext.dalrrd_emc_dcpr import helpers
pytestmark = pytest.mark.unit
@pytest.mark.parametrize(
"value, expected",
[
(
{
"type": "Polygon",
"coordinates": [
[[0.0, 0.0], [10.0, 0.0], [10.0, 10.0], [0.0, 10.0], [0.0, 0.0]]
],
},
[10.0, 0.0, 0.0, 10.0],
),
],
)
def test_convert_geojson_to_bbox(value, expected):
assert helpers.convert_geojson_to_bbox(value) == expected
|
py | b40bcfc26d0044fae976c6bf0dd7f165f25805d5 | import os, sys, json, operator
DECL_PUBLIC = 0x000001
DECL_PRIVATE = 0x000002
DECL_PROTECTED = 0x000004
DECL_INTERNAL = 0x000008
DECL_VIRTUAL = 0x000100
DECL_OVERRIDE = 0x000200
DECL_ABSTRACT = 0x000400
DECL_FINAL = 0x000800
DECL_EXTERN = 0x001000
DECL_EXTENSION = 0x002000
DECL_DEFAULT = 0x004000
DECL_GETTER = 0x010000
DECL_SETTER = 0x020000
DECL_OPERATOR = 0x040000
DECL_IFACEMEMBER = 0x080000
DECL_ACCESSMASK = DECL_PUBLIC|DECL_PRIVATE|DECL_PROTECTED|DECL_INTERNAL
#------------------------------------------------------------------------------
def get_flags(flags):
f = ''
if flags & DECL_PUBLIC: f+='PUBLIC' if f == '' else '|PUBLIC'
if flags & DECL_PRIVATE: f+='PRIVATE' if f == '' else '|PRIVATE'
if flags & DECL_PROTECTED: f+='PROTECTED' if f == '' else '|PROTECTED'
if flags & DECL_VIRTUAL: f+='VIRTUAL' if f == '' else '|VIRTUAL'
if flags & DECL_OVERRIDE: f+='OVERRIDE' if f == '' else '|OVERRIDE'
if flags & DECL_ABSTRACT: f+='ABSTRACT' if f == '' else '|ABSTRACT'
if flags & DECL_FINAL: f+='FINAL' if f == '' else '|FINAL'
if flags & DECL_EXTERN: f+='EXTERN' if f == '' else '|EXTERN'
if flags & DECL_EXTENSION: f+='EXTENSION' if f == '' else '|EXTENSION'
if flags & DECL_DEFAULT: f+='DEFAULT' if f == '' else '|DEFAULT'
if flags & DECL_GETTER: f+='GETTER' if f == '' else '|GETTER'
if flags & DECL_SETTER: f+='SETTER' if f == '' else '|SETTER'
if flags & DECL_OPERATOR: f+='OPERATOR' if f == '' else '|OPERATOR'
if flags & DECL_IFACEMEMBER: f+='IFACEMEMBER' if f == '' else '|PUBLIC'
return f if f != '' else 'NONE'
def is_public(flags):
return flags & DECL_PUBLIC
def is_private(flags):
return flags & DECL_PRIVATE
def is_protected(flags):
return flags & DECL_PROTECTED
def is_virtual(flags):
return flags & DECL_VIRTUAL
def is_override(flags):
return flags & DECL_OVERRIDE
def is_abstract(flags):
return flags & DECL_ABSTRACT
def is_final(flags):
return flags & DECL_FINAL
def is_extern(flags):
return flags & DECL_EXTERN
def is_extension(flags):
return flags & DECL_EXTENSION
def is_default(flags):
return flags & DECL_DEFAULT
def is_getter(flags):
return flags & DECL_GETTER
def is_setter(flags):
return flags & DECL_SETTER
def is_operator(flags):
return flags & DECL_OPERATOR
def is_ifacemember(flags):
return flags & DECL_IFACEMEMBER
def get_short_type(semtype):
def get_last_type(stype):
return stype.split('.')[-1]
if '<' in semtype and '>' in semtype:
typ, _, gen = semtype.partition('<')
typ = get_last_type(typ)
gen = gen[:-1].split(',')
gen = [get_last_type(x) for x in gen]
return typ+'<'+', '.join(gen)+'>'
else:
return semtype.split('.')[-1]
def debug_decl(file, decl):
print('in file: '+file)
print(json.dumps(decl, indent=2))
#------------------------------------------------------------------------------
def parse_func(file, decl):
out = {}
kind_func = decl['kind']
if decl['ident'] in '<=>,*,/,+,-,&,~,<>,=,|,to'.split(','):
kind_func = 'operator'
out['kind'] = kind_func
out['name'] = decl['ident']
out['flags'] = decl['flags']
out['type'] = decl['type']['kind']
# generic args <...>
if 'genArgs' in decl:
out['genArgs'] = decl['genArgs']
# return type
if 'retType' in decl['type']:
rtype = decl['type']['retType']
kind = rtype['kind']
if kind == 'ident':
rttype = rtype['ident']
elif kind == 'arraytype':
rttype = rtype['type']['ident']+'[]'
elif kind == 'generic':
rttype = rtype['expr']['ident']+'<'+(','.join([arg['ident'].split('.')[-1] for arg in rtype['args']]))+'>'
else:
debug_decl(file, decl)
out['retType'] = {
'kind': kind,
'semtype': rttype
}
# parameters
if 'params' in decl['type']:
out['params'] = []
for param in decl['type']['params']:
out_param = {}
out_param['name'] = param['ident']
if 'semtype' in param:
out_param['semtype'] = get_short_type(param['semtype'])
else:
if 'kind' in param['type']:
out_param['semtype'] = param['type']['kind']
out['params'].append(out_param)
return out
#------------------------------------------------------------------------------
def parse_var(file, decl):
out = {}
out['kind'] = decl['kind']
out['name'] = decl['ident']
out['flags'] = decl['flags']
semtype = ''
if 'semtype' in decl:
semtype = get_short_type(decl['semtype'])
else:
if 'type' in decl:
kind = decl['type']['kind']
if kind == 'ident':
semtype = get_short_type(decl['type']['ident'])
elif kind == 'arraytype':
semtype = decl['type']['ident']+'[]'
elif kind == 'generic':
semtype = decl['type']['expr']['ident']+'<'+(','.join([arg['ident'].split('.')[-1] for arg in decl['type']['args']]))+'>'
else:
debug_decl(file, decl)
out['semtype'] = semtype
return out
#------------------------------------------------------------------------------
def parse_class(file, decl):
out = {}
out['kind'] = decl['kind']
out['name'] = decl['ident']
out['flags'] = decl['flags']
out['members'] = []
if 'namespace' in decl:
out['namespace'] = decl['namespace']
if 'superType' in decl: # extends ...
out['superType'] = decl['superType']['ident']
if 'ifaceTypes' in decl: # implements [...]
out['ifaceTypes'] = ','.join([ident['ident'] for ident in decl['ifaceTypes']])
if 'members' in decl:
for member in decl['members']:
kind = member['kind']
out_member = {}
if kind in ['field', 'const', 'global']:
out_member = parse_var(file, member)
if kind in ['method', 'function', 'operator', 'getter', 'setter']:
out_member = parse_func(file, member)
if len(out_member):
out['members'].append(out_member)
return out
#------------------------------------------------------------------------------
def parse_decl(file, decl):
kind = decl['kind']
if kind in ['class', 'struct', 'interface']:
return parse_class(file, decl)
else:
return None
#------------------------------------------------------------------------------
def parse_file(file, decl):
out = {}
out['kind'] = 'file'
out['file'] = file
out['name'] = decl['ident']
out['flags'] = decl['flags']
out['decls'] = []
if 'namespace' in decl:
out['namespace'] = decl['namespace']
if 'members' in decl:
for member in decl['members']:
out_member = parse_decl(file, member)
if out_member is not None:
out['decls'].append(out_member)
if 'imports' in decl:
for imp in decl['imports']:
if not(imp.startswith('<') and imp.endswith('>')):
imp_file = os.path.splitext(imp)[0]+'.geninfo'
imp_ast = open(imp_file, 'r')
imp_out = json.load(imp_ast)
imp_ast.close()
out_import = parse_file(imp_file, imp_out)
out['decls'].append(out_import)
if 'usings' in decl:
out['usings'] = ['monkey.types']+decl['usings']
return out
#------------------------------------------------------------------------------
def gen_ir(src):
ast = open(src, 'r')
inp = json.load(ast)
ast.close()
out = parse_file(src, inp)
return out
#------------------------------------------------------------------------------
out_lines = ''
def l(s='', indent=0):
global out_lines
out_lines += (' ' * indent) + s
def ln(s='', indent=0):
global out_lines
out_lines += (' ' * indent) + s + '\n'
def reset_globals():
global out_lines
out_lines = ''
def get_visibility(flags):
if is_public(flags): return '+'
if is_private(flags): return '-'
if is_protected(flags): return '#'
def get_members_kind(members, kind):
return sorted(filter(lambda x: x['kind'] == kind, members), key=lambda x: x['name'])
#------------------------------------------------------------------------------ list
def emit_txt_var(decl):
ln(get_visibility(decl['flags'])+decl['kind']+' '+decl['name']+': '+decl['semtype'])
def emit_txt_func(decl):
l(get_visibility(decl['flags'])+decl['kind']+' '+decl['name'])
# generic args
if 'genArgs' in decl:
l('<'+', '.join(decl['genArgs'])+'>')
l(': ')
# return type
if 'retType' in decl:
l(decl['retType']['semtype'])
# parameters
if 'params' in decl:
params = ''
for param in decl['params']:
if params != '': params+=', '
params+=param['name']+': '+param['semtype']
l('('+params+')')
ln()
def emit_txt_class(decl):
l(decl['kind']+' '+decl['name'])
if 'superType' in decl:
l(' --> '+decl['superType'])
if 'ifaceTypes' in decl:
l(' implements '+decl['ifaceTypes'])
ln()
# attributes
for at in get_members_kind(decl['members'], 'const'):
emit_txt_var(at)
for at in get_members_kind(decl['members'], 'global'):
emit_txt_var(at)
for at in get_members_kind(decl['members'], 'field'):
emit_txt_var(at)
ln('--')
# operations
for op in get_members_kind(decl['members'], 'function'):
emit_txt_func(op)
for op in get_members_kind(decl['members'], 'operator'):
emit_txt_func(op)
for op in get_members_kind(decl['members'], 'method'):
emit_txt_func(op)
ln()
def gen_txt(ir):
if 'decls' in ir:
for decl in ir['decls']:
kind = decl['kind']
if kind in ['class', 'struct', 'interface']:
emit_txt_class(decl)
if 'decls' in decl:
gen_txt(decl)
#------------------------------------------------------------------------------ mermaid.js
def emit_mjs_var(decl, indent=0):
l(get_visibility(decl['flags'])+decl['name'], indent)
# static
if decl['kind'] in ['const', 'global']:
l('$')
l(': '+decl['semtype'])
ln()
def emit_mjs_func(decl, indent=0):
l(get_visibility(decl['flags'])+decl['name'], indent)
# generic args
if 'genArgs' in decl:
l('<'+', '.join(decl['genArgs'])+'>')
# parameters
if 'params' in decl:
params = ''
for param in decl['params']:
if params != '': params+=', '
params+=param['semtype']
l('('+params+')')
# abstract/static
if is_abstract(decl['flags']) or is_virtual(decl['flags']):
l('*')
elif decl['kind'] in ['function']:
l('$')
# return type
if 'retType' in decl:
l(' '+decl['retType']['semtype'])
ln()
def emit_mjs_class(decl):
ln(decl['kind']+' '+decl['name']+ ' {', 2)
# attributes
for at in get_members_kind(decl['members'], 'const'):
emit_mjs_var(at, 4)
for at in get_members_kind(decl['members'], 'global'):
emit_mjs_var(at, 4)
for at in get_members_kind(decl['members'], 'field'):
emit_mjs_var(at, 4)
# operations
for op in get_members_kind(decl['members'], 'function'):
emit_mjs_func(op, 4)
for op in get_members_kind(decl['members'], 'operator'):
emit_mjs_func(op, 4)
for op in get_members_kind(decl['members'], 'method'):
emit_mjs_func(op, 4)
ln('}', 2)
if 'superType' in decl:
ln(decl['superType']+' <|-- '+decl['name'],2)
if 'ifaceTypes' in decl:
for iface in decl['ifaceTypes'].split(','):
ln(iface+' <.. '+decl['name'], 2)
ln()
def gen_mjs(ir):
global out_lines
if out_lines == '':
ln('classDiagram')
if 'decls' in ir:
for decl in ir['decls']:
kind = decl['kind']
if kind in ['class', 'struct', 'interface']:
# class name
emit_mjs_class(decl)
if 'decls' in decl:
gen_mjs(decl)
#------------------------------------------------------------------------------
if __name__ == '__main__':
#os.chdir(os.path.split(__file__)[0])
out = gen_ir('wake.geninfo')
# save ir in file
sav = open('gen_ir.json', 'w')
sav.write(json.dumps(out, indent=2))
sav.close
# generate simple text
gen_txt(out)
sav = open('gen_txt.txt', 'w')
sav.write(out_lines)
sav.close
reset_globals()
# generate mermaid
gen_mjs(out)
sav = open('gen_mjs.txt', 'w')
sav.write(out_lines)
sav.close
|
py | b40bd299a26c37f69ba583ce535e99e2ba71fd32 | import datetime
import operator
import numpy as np
import pandas as pd
import pytest
from hypothesis import given, assume
from hypothesis import strategies as st
from hypothesis.extra import numpy as npst
from happenings.event import MONTHS
JAN_1_2020 = datetime.datetime.strptime("2020-01-01", "%Y-%M-%d")
JAN_1_2021 = datetime.datetime.strptime("2021-01-01", "%Y-%M-%d") - datetime.timedelta(days=1)
DATES_2020 = pd.date_range(start=JAN_1_2020, end=JAN_1_2021, freq="D")
JAN_EVENT = MONTHS['January']
JAN_EVENT_2020_VALUES = np.zeros_like(DATES_2020).astype(bool)
JAN_EVENT_2020_VALUES[0:31] = True
BINARY_COMPARISON_OPS = (operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.ge,
operator.gt)
BINARY_BITWISE_OPS = (operator.and_,
operator.xor,
operator.or_)
BINARY_MATH_OPS_ZERO_ISSUES = (operator.mod,
operator.floordiv,
operator.truediv)
BINARY_NUM_OPS = (operator.add,
operator.floordiv,
operator.lshift,
operator.mod,
operator.mul,
operator.matmul,
operator.pow,
operator.rshift,
operator.sub,
operator.truediv)
def test_functional_event():
np.testing.assert_array_equal(JAN_EVENT.sample_from(DATES_2020),
JAN_EVENT_2020_VALUES)
@pytest.mark.parametrize('op', BINARY_COMPARISON_OPS)
@given(a=st.floats(allow_nan=False))
def test_cmp_scalar(a, op):
np.testing.assert_array_equal(op(a, JAN_EVENT).sample_from(DATES_2020),
op(a, JAN_EVENT_2020_VALUES))
np.testing.assert_array_equal(op(JAN_EVENT, a).sample_from(DATES_2020),
op(JAN_EVENT_2020_VALUES, a))
@pytest.mark.parametrize('op', BINARY_COMPARISON_OPS)
@given(a=npst.arrays(shape=len(JAN_EVENT_2020_VALUES), dtype=st.sampled_from([np.int, np.float])))
def test_cmp_array(a, op):
# Not supported as the cmp operators are handled by numpy comparing each item individually.
# np.testing.assert_array_equal(op(a, JAN_EVENT).sample_from(DATES_2020),
# op(a, JAN_EVENT_2020_VALUES))
np.testing.assert_array_equal(op(JAN_EVENT, a).sample_from(DATES_2020),
op(JAN_EVENT_2020_VALUES, a))
@pytest.mark.parametrize('op', BINARY_COMPARISON_OPS)
def test_cmp_event(op):
np.testing.assert_array_equal(op(JAN_EVENT, JAN_EVENT).sample_from(DATES_2020),
op(JAN_EVENT_2020_VALUES, JAN_EVENT_2020_VALUES))
@pytest.mark.parametrize('op', BINARY_BITWISE_OPS)
@given(a=st.booleans())
def test_bitwise_scalar(a, op):
np.testing.assert_array_equal(op(a, JAN_EVENT).sample_from(DATES_2020),
op(a, JAN_EVENT_2020_VALUES))
np.testing.assert_array_equal(op(JAN_EVENT, a).sample_from(DATES_2020),
op(JAN_EVENT_2020_VALUES, a))
@pytest.mark.parametrize('op', BINARY_BITWISE_OPS)
@given(a=npst.arrays(shape=len(JAN_EVENT_2020_VALUES), dtype=np.bool))
def test_bitwise_array(a, op):
# Not supported as the bitwise operators are handled by numpy comparing each item individually.
# np.testing.assert_array_equal(op(a, JAN_EVENT).sample_from(DATES_2020),
# op(a, JAN_EVENT_2020_VALUES))
np.testing.assert_array_equal(op(JAN_EVENT, a).sample_from(DATES_2020),
op(JAN_EVENT_2020_VALUES, a))
@pytest.mark.parametrize('op', BINARY_BITWISE_OPS)
@given(a=npst.arrays(shape=len(JAN_EVENT_2020_VALUES), dtype=np.bool))
def test_bitwise_event(a, op):
np.testing.assert_array_equal(op(JAN_EVENT, JAN_EVENT).sample_from(DATES_2020),
op(JAN_EVENT_2020_VALUES, JAN_EVENT_2020_VALUES))
@pytest.mark.parametrize('op', BINARY_MATH_OPS_ZERO_ISSUES)
@given(a=st.floats(allow_nan=False, allow_infinity=False))
def test_div_scalar(a, op):
np.testing.assert_array_equal(op(a, 1+JAN_EVENT).sample_from(DATES_2020),
op(a, 1+JAN_EVENT_2020_VALUES))
assume(a != 0)
np.testing.assert_array_equal(op(JAN_EVENT, a).sample_from(DATES_2020),
op(JAN_EVENT_2020_VALUES, a))
@pytest.mark.skip("")
@pytest.mark.parametrize('op', BINARY_MATH_OPS_ZERO_ISSUES)
@given(a=npst.arrays(shape=len(JAN_EVENT_2020_VALUES),
dtype=np.float,
elements=st.floats(allow_infinity=False, allow_nan=False)))
def test_div_array(a, op):
# Not supported as the cmp operators are handled by numpy comparing each item individually.
# np.testing.assert_array_equal(op(a, JAN_EVENT).sample_from(DATES_2020),
# op(a, JAN_EVENT_2020_VALUES))
a[a == 0] = 1
np.testing.assert_array_equal(op(JAN_EVENT, a).sample_from(DATES_2020),
op(JAN_EVENT_2020_VALUES, a))
@pytest.mark.parametrize('op', BINARY_MATH_OPS_ZERO_ISSUES)
def test_div_event(op):
np.testing.assert_array_equal(op(JAN_EVENT, JAN_EVENT+1).sample_from(DATES_2020),
op(JAN_EVENT_2020_VALUES, JAN_EVENT_2020_VALUES+1))
|
py | b40bd302f6abc4df640f87196581c0d67d902623 | name = "WindbgTool"
|
py | b40bd3c3a4a9bd4942f50952b2c11495dc6ac237 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import defaultdict
from nodal.core.exceptions import (
CyclicDependencyException,
MaxInputsExceededException,
NodeTypeMismatchException
)
def verify_dependencies(node, input_node):
"""
Checks for cyclic dependencies.
Args:
node (BaseNode): Current node
input_node (BaseNode): Node to connect current node's input to
Raises:
CyclicDependencyException: When node's input chain ends up referring
back to this node.
"""
if input_node is node:
msg = f'Unable to set {input_node} as input for {node}.'
raise CyclicDependencyException(msg)
for input_idx, upstream_node in input_node.inputs.items():
if not upstream_node:
continue
if upstream_node is node:
msg = f'Unable to set {upstream_node} as input for {node}.'
raise CyclicDependencyException(msg)
verify_dependencies(node, upstream_node)
def verify_type_match(node, input_idx, parent_node):
"""
Verifies that the input_node outputs a type supported by node's input index.
Args:
node (BaseNode): Current node
input_idx (int): Current node's input to be connected to input_node
parent_node (BaseNode): Node to connect current node's input to
Raises:
NodeTypeMismatchException: When input_node does not output a type
supported by node's input index.
"""
if -1 in node.input_types:
input_types = node.input_types[-1]['types']
else:
input_types = node.input_types[input_idx]['types']
if not any(issubclass(parent_node.output_type['type'], t) for t in input_types):
msg = (
f'Input {input_idx} for node {node.name!r} expects type(s) '
f'{", ".join([repr(t.__name__) for t in input_types])}, while node '
f'{parent_node.name!r} only outputs type '
f'{parent_node.output_type["type"].__name__!r}.'
)
raise NodeTypeMismatchException(msg)
def verify_input_index(node, input_idx):
"""
Verifies that input index does not exceed node's max inputs.
Args:
node (BaseNode): Current node
input_idx (int): Current node's input to be connected to input_node
Raises:
MaxInputsExceededException: When input_idx exceeds node's max_inputs
"""
# Verify Index
if node.max_inputs is not -1 and input_idx + 1 > node.max_inputs:
raise MaxInputsExceededException
def verify_connection(node, input_idx, input_node):
"""
Performs various verification checks before allowing node and input_node to
be connected.
Args:
node (BaseNode): Current node
input_idx (int): Current node's input to be connected to input_node
input_node (BaseNode): Node to connect current node's input to
Raises:
CyclicDependencyException: When verification fails
"""
if input_node is None:
return
verify_input_index(node, input_idx)
verify_type_match(node, input_idx, input_node)
verify_dependencies(node, input_node)
|
py | b40bd48e1142f5e1ae2ac9575a75c8ecba52c609 | #!/usr/bin/env python3
import numpy as np
import shelve
import isceobj
import copy
import datetime
import os
from imageMath import IML
import logging
#####Helper functions for geobox manipulation
def geoboxToAzrgbox(frame, geobox, israw=False, isnative=False, margin=0.02, zrange=None):
'''
Convert a geo bounding box - SNWE to pixel limits.
'''
from isceobj.Util.Poly2D import Poly2D
from isceobj.Planet.Planet import Planet
from isceobj.Constants import SPEED_OF_LIGHT
if zrange is None:
zrange = [-500., 9000.]
rgs = []
azs = []
combos = [ [geobox[0]-margin, geobox[2]-margin],
[geobox[0]-margin, geobox[3]+margin],
[geobox[1]+margin, geobox[3]-margin],
[geobox[1]+margin, geobox[2]+margin] ]
lookSide = frame.instrument.platform.pointingDirection
planet = Planet(pname='Earth')
wvl = frame.instrument.getRadarWavelength()
if (isnative or israw):
####If geometry is in native doppler / raw
####You need doppler as a function of range to do
####geometry mapping correctly
###Currently doppler is saved as function of pixel number - old ROIPAC style
###Transform to function of slant range
coeff = frame._dopplerVsPixel
doppler = Poly2D()
doppler._meanRange = frame.startingRange
doppler._normRange = frame.instrument.rangePixelSize
doppler.initPoly(azimuthOrder=0, rangeOrder=len(coeff)-1, coeffs=[coeff])
else:
###Zero doppler system
doppler = Poly2D()
doppler.initPoly(azimuthOrder=0, rangeOrder=0, coeffs=[[0.]])
####Do
for z in zrange:
for combo in combos:
try:
taz, rgm = frame.orbit.geo2rdr(combo + [z], side=lookSide,
doppler=doppler, wvl=wvl)
azs.append(taz)
rgs.append(rgm)
except:
pass
if len(azs) <= 1:
raise Exception('Could not map geobbox coordinates to image')
azrgbox = [np.min(azs), np.max(azs), np.min(rgs), np.max(rgs)]
if israw:
####If cropping raw product, need to add an aperture length in range and azimuth
###Extra slant range at near and far range due to the uncompressed pulse
deltaRg = np.abs(frame.instrument.pulseLength * SPEED_OF_LIGHT/2.0)
print('RAW data - adding range aperture (in m) : ', deltaRg)
azrgbox[2] -= deltaRg
azrgbox[3] += deltaRg
###Extra azimuth samples at far range
elp =copy.copy( planet.ellipsoid)
svmid = frame.orbit.interpolateOrbit(frame.sensingMid, method='hermite')
xyz = svmid.getPosition()
vxyz = svmid.getVelocity()
llh = elp.xyz_to_llh(xyz)
heading = frame.orbit.getENUHeading(frame.sensingMid)
print('Heading: ', heading)
elp.setSCH(llh[0], llh[1], heading)
sch, schvel = elp.xyzdot_to_schdot(xyz, vxyz)
vel = np.linalg.norm(schvel)
synthAperture = np.abs(wvl* azrgbox[3]/(frame.instrument.platform.antennaLength*vel))
deltaAz = datetime.timedelta(seconds=synthAperture)
print('RAW data - adding azimuth aperture (in s) : ', synthAperture)
azrgbox[0] -= deltaAz
azrgbox[1] += deltaAz
return azrgbox
def cropFrame(frame, limits, outname, israw=False):
'''
Crop the frame.
Parameters to change:
startingRange
farRange
sensingStart
sensingStop
sensingMid
numberOfLines
numberOfSamples
dopplerVsPixel
'''
outframe = copy.deepcopy(frame)
if not israw:
img = isceobj.createImage()
img.load(frame.image.filename+'.xml')
outframe.image = img
if israw:
factor = 2
else:
factor = 1
####sensing start
ymin = np.floor( (limits[0] - frame.sensingStart).total_seconds() * frame.PRF)
print('Line start: ', ymin)
ymin = np.int( np.clip(ymin, 0, frame.numberOfLines-1))
####sensing stop
ymax = np.ceil( (limits[1] - frame.sensingStart).total_seconds() * frame.PRF) + 1
print('Line stop: ', ymax)
ymax = np.int( np.clip(ymax, 1, frame.numberOfLines))
print('Line limits: ', ymin, ymax)
print('Original Line Limits: ', 0, frame.numberOfLines)
if (ymax-ymin) <= 1:
raise Exception('Azimuth limits appear to not overlap with the scene')
outframe.sensingStart = frame.sensingStart + datetime.timedelta(seconds = ymin/frame.PRF)
outframe.numberOfLines = ymax - ymin
outframe.sensingStop = frame.sensingStop + datetime.timedelta(seconds = (ymax-1)/frame.PRF)
outframe.sensingMid = outframe.sensingStart + 0.5 * (outframe.sensingStop - outframe.sensingStart)
####starting range
xmin = np.floor( (limits[2] - frame.startingRange)/frame.instrument.rangePixelSize)
print('Pixel start: ', xmin)
xmin = np.int(np.clip(xmin, 0, (frame.image.width//factor)-1))
####far range
xmax = np.ceil( (limits[3] - frame.startingRange)/frame.instrument.rangePixelSize)+1
print('Pixel stop: ', xmax)
xmax = np.int(np.clip(xmax, 1, frame.image.width//factor))
print('Pixel limits: ', xmin, xmax)
print('Original Pixel Limits: ', 0, frame.image.width//factor)
if (xmax - xmin) <= 1:
raise Exception('Range limits appear to not overlap with the scene')
outframe.startingRange = frame.startingRange + xmin * frame.instrument.rangePixelSize
outframe.numberOfSamples = (xmax - xmin) * factor
outframe.setFarRange( frame.startingRange + (xmax-xmin-1) * frame.instrument.rangePixelSize)
####Adjust Doppler centroid coefficients
coeff = frame._dopplerVsPixel
rng = np.linspace(xmin, xmax, len(coeff) + 1)
dops = np.polyval(coeff[::-1], rng)
rng = rng - xmin ###Adjust the start
pol = np.polyfit(rng, dops, len(coeff)-1)
outframe._dopplerVsPixel = list(pol[::-1])
####Adjusting the image now
####Can potentially use israw to apply more logic but better to use new version
if frame.image.xmin != 0 :
raise Exception('Looks like you are still using an old version of ISCE. The new version completely strips out the header bytes. Please switch to the latest ...')
inname = frame.image.filename
suffix = os.path.splitext(inname)[1]
outdirname = os.path.dirname(outname)
os.makedirs(outdirname, exist_ok=True)
indata = IML.mmapFromISCE(inname, logging)
indata.bands[0][ymin:ymax,xmin*factor:xmax*factor].tofile(outname)
indata = None
outframe.image.filename = outname
outframe.image.width = outframe.numberOfSamples
outframe.image.length = outframe.numberOfLines
outframe.image.xmax = outframe.numberOfSamples
outframe.image.coord1.coordSize = outframe.numberOfSamples
outframe.image.coord1.coordEnd = outframe.numberOfSamples
outframe.image.coord2.coordSize = outframe.numberOfLines
outframe.image.coord2.coordEnd = outframe.numberOfLines
outframe.image.renderHdr()
return outframe
def runCrop(self, raw=False):
'''
Crop step based on region of interest.
'''
bbox = self.regionOfInterest
if raw:
if self.regionOfInterest is None:
self._insar.masterRawCropProduct = self._insar.masterRawProduct
self._insar.slaveRawCropProduct = self._insar.slaveRawProduct
print('No region of interesting provided, skipping cropping of raw data')
return
###Check if master started at raw
if self._insar.masterRawProduct is None:
self._insar.masterRawCropProduct = self._insar.masterRawProduct
print('Looks like master product is SLC, skipping raw cropping')
else:
frame = self._insar.loadProduct( self._insar.masterRawProduct)
outdir = os.path.splitext(self._insar.masterRawProduct)[0] + '_crop'
outname = os.path.join( outdir, os.path.basename(self.master.output) + '.raw')
limits = geoboxToAzrgbox(frame, self.regionOfInterest,
israw=True, zrange=self.heightRange)
outframe = cropFrame(frame, limits, outname,
israw=True)
self._insar.saveProduct( outframe, outdir + '.xml')
self._insar.masterRawCropProduct = outdir + '.xml'
frame = None
outframe = None
###Check if slave started at raw
if self._insar.slaveRawProduct is None:
self._insar.slaveRawCropProduct = self._insar.slaveRawProduct
print('Looks like slave product is SLC, skipping raw cropping')
else:
frame = self._insar.loadProduct( self._insar.slaveRawProduct)
outdir = os.path.splitext(self._insar.slaveRawProduct)[0] + '_crop'
outname = os.path.join( outdir, os.path.basename(self.slave.output) + '.raw')
limits = geoboxToAzrgbox(frame, self.regionOfInterest,
israw=True, zrange=self.heightRange)
outframe = cropFrame(frame, limits, outname,
israw=True)
self._insar.saveProduct( outframe, outdir + '.xml')
self._insar.slaveRawCropProduct = outdir + '.xml'
frame = None
outframe = None
return
else:
if self.regionOfInterest is None:
self._insar.masterSlcCropProduct = self._insar.masterSlcProduct
self._insar.slaveSlcCropProduct = self._insar.slaveSlcProduct
print('No region of interesting provided, skipping cropping of slc data')
return
###Crop master SLC
frame = self._insar.loadProduct( self._insar.masterSlcProduct)
outdir = os.path.splitext(self._insar.masterSlcProduct)[0] + '_crop'
outname = os.path.join( outdir, os.path.basename(self.master.output) + '.slc')
limits = geoboxToAzrgbox(frame, self.regionOfInterest,
israw=False, isnative=self.insar.masterGeometrySystem.upper().startswith('NATIVE'),
zrange=self.heightRange)
outframe = cropFrame(frame, limits, outname,
israw=False)
self._insar.saveProduct( outframe, outdir + '.xml')
self._insar.masterSlcCropProduct = outdir + '.xml'
frame = None
outframe = None
###Crop master SLC
frame = self._insar.loadProduct( self._insar.slaveSlcProduct)
outdir = os.path.splitext(self._insar.slaveSlcProduct)[0] + '_crop'
outname = os.path.join( outdir, os.path.basename(self.slave.output) + '.slc')
limits = geoboxToAzrgbox(frame, self.regionOfInterest,
israw=False, isnative=self.insar.masterGeometrySystem.upper().startswith('NATIVE'),
zrange=self.heightRange)
outframe = cropFrame(frame, limits, outname,
israw=False)
self._insar.saveProduct( outframe, outdir + '.xml')
self._insar.slaveSlcCropProduct = outdir + '.xml'
frame = None
outframe = None
return
|
py | b40bd502da0b18ee4d867cf09d74cb057d51a613 | """
Contains ConfirmEmail object which constructs an email to send to the related booking
upon a booking being marked confirmed.
"""
from django.core.mail import EmailMessage
class ConfirmEmail(object):
"""
Needs a related Booking model with the booker's details.
"""
def __init__(self, booking):
self.booking = booking
self.body = ""
self.to_email = booking.contact_email
self.subject = f"Confirmed: Booking at {booking.scheduled_at}"
def generate_body(self):
"""
Generates generic email body confirming the booking.
"""
body = f"""
Hi {self.booking.contact_name},
Thank you for booking a tour with us. Your tour has been confirmed
at {self.booking.scheduled_at}. We can confirm that the tour will last
{self.booking.duration} minutes.
Please confirm the rest of the details below:
Tour Information:
- Date: {self.booking.scheduled_at}
- Duration: {self.booking.duration}
- Stops: {self.booking.places_to_visit}
"""
if self.booking.is_group:
body += f"""
Group Information:
- Group name: {self.booking.group_name}
- Group #: {self.booking.group_number}
- Age range: {self.booking.age_group}
- Transportation: {self.booking.transportation}
"""
body += f"""
Extra details:
- {self.booking.extra_details}
We look forward to seeing you!
Regards,
Tour Team
"""
self.body = body
def send(self):
email = EmailMessage(
subject=self.subject,
body=self.body,
to=[self.to_email]
)
email.send()
|
py | b40bd51fc86ecfaf768ac0b4803382edc207602d | from setuptools import setup, find_packages
setup(
name='facebook-online-friend-tracker',
version='2.0.1',
description='This tool tracks the number of online friends a user has on Facebook at any given time.',
long_description=open('README.md').read(),
url='https://github.com/bhamodi/facebook-online-friend-tracker',
author='Baraa Hamodi',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: Log Analysis',
'Topic :: Software Development',
'Topic :: Utilities',
],
keywords='facebook online friend tracker python scraper selenium analysis optimization',
packages=find_packages(),
install_requires=[
# 'chromedriver_installer>=0.0.4',
# 'selenium>=3.0.2',
],
entry_points={
'console_scripts': [
'facebook-online-friend-tracker=src:main',
'update_daily_stats_lucjan=src:take_last_day_and_calculate_daily_stats'
],
},
)
|
py | b40bd595ac989ae6cd646a33a491b377840ecb72 | # Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
from dataclasses import dataclass, field
from datetime import datetime
from re import match
from typing import TYPE_CHECKING
from ...exceptions import InvalidUrlError, EmbedFieldError
from ...utils.api_object import APIObject
from ...utils.types import MISSING
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Iterable, Union, Optional
from ...utils.types import APINullable
def _field_size(_field: str) -> int:
"""
The Discord API removes white space
when counting the length of a field.
:param _field:
The field.
:return:
Length of the string without white space.
"""
return 0 if _field == MISSING else len(_field.strip())
def _is_valid_url(url: str) -> bool:
"""
Checks whether the url is a proper and valid url.
(matches for http and attachment protocol.
:param url:
The url which must be checked.
:return:
Whether the provided url is valid.
"""
stmt = (
r"(http[s]|attachment)"
r"?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|"
r"(?:%[0-9a-fA-F][0-9a-fA-F]))+"
)
return bool(match(stmt, url))
def _check_if_valid_url(url: str):
"""Checks if the provided url is valid.
Raises
------
:class:`~pincer.exceptions.InvalidUrlError`
if the url didn't match the url regex.
(which means that it was malformed or didn't match the http/attachment
protocol).
"""
if not _is_valid_url(url):
raise InvalidUrlError(
"Url was malformed or wasn't of protocol http(s)/attachment."
)
@dataclass
class EmbedAuthor:
"""Representation of the Embed Author class
Attributes
----------
name: APINullable[:class:`str`]
Name of the author
url: APINullable[:class:`str`]
Url of the author
icon_url: APINullable[:class:`str`]
Url of the author icon
proxy_icon_url: APINullable[:class:`str`]
A proxied url of the author icon
"""
icon_url: APINullable[str] = MISSING
name: APINullable[str] = MISSING
proxy_icon_url: APINullable[str] = MISSING
url: APINullable[str] = MISSING
def __post_init__(self): # stop documenting special methods
if _field_size(self.name) > 256:
raise EmbedFieldError.from_desc("Author name", 256, len(self.name))
_check_if_valid_url(self.url)
@dataclass
class EmbedImage:
"""Representation of the Embed Image class
Attributes
----------
url: APINullable[:class:`str`]
Source url of the image
proxy_url: APINullable[:class:`str`]
A proxied url of the image
height: APINullable[:class:`int`]
Height of the image
width: APINullable[:class:`int`]
Width of the image
"""
url: APINullable[str] = MISSING
proxy_url: APINullable[str] = MISSING
height: APINullable[int] = MISSING
width: APINullable[int] = MISSING
def __post_init__(self):
_check_if_valid_url(self.url)
@dataclass
class EmbedProvider:
"""Representation of the Provider class
Attributes
----------
name: APINullable[:class:`str`]
Name of the provider
url: APINullable[:class:`str`]
Url of the provider
"""
name: APINullable[str] = MISSING
url: APINullable[str] = MISSING
@dataclass
class EmbedThumbnail:
"""Representation of the Embed Thumbnail class
Attributes
----------
url: APINullable[:class:`str`]
Source url of the thumbnail
proxy_url: APINullable[:class:`str`]
A proxied url of the thumbnail
height: APINullable[:class:`int`]
Height of the thumbnail
width: APINullable[:class:`int`]
Width of the thumbnail
"""
url: APINullable[str] = MISSING
proxy_url: APINullable[str] = MISSING
height: APINullable[int] = MISSING
width: APINullable[int] = MISSING
def __post_init__(self):
_check_if_valid_url(self.url)
@dataclass
class EmbedVideo:
"""Representation of the Embed Video class
Attributes
----------
url: APINullable[:class:`str`]
Source url of the video
proxy_url: APINullable[:class:`str`]
A proxied url of the video
height: APINullable[:class:`int`]
Height of the video
width: APINullable[:class:`int`]
Width of the video
"""
height: APINullable[int] = MISSING
url: APINullable[str] = MISSING
proxy_url: APINullable[str] = MISSING
width: APINullable[int] = MISSING
@dataclass
class EmbedFooter:
"""Representation of the Embed Footer class
Attributes
----------
text: :class:`str`
Footer text
icon_url: APINullable[:class:`str`]
Url of the footer icon
proxy_icon_url: APINullable[:class:`str`]
A proxied url of the footer icon
Raises
------
EmbedFieldError:
Text is longer than 2048 characters
"""
text: str
icon_url: APINullable[str] = MISSING
proxy_icon_url: APINullable[str] = MISSING
def __post_init__(self):
if _field_size(self.text) > 2048:
raise EmbedFieldError.from_desc(
"Footer text", 2048, len(self.text)
)
@dataclass
class EmbedField:
"""Representation of the Embed Field class
Attributes
----------
name: :class:`str`
The name of the field
value: :class:`str`
The text in the field
inline: APINullable[:class:`bool`]
Whether or not this field should display inline
Raises
------
EmbedFieldError:
Name is longer than 256 characters
EmbedFieldError:
Description is longer than 1024 characters
"""
name: str
value: str
inline: APINullable[bool] = MISSING
def __post_init__(self):
if _field_size(self.name) > 256:
raise EmbedFieldError.from_desc(
"Field name", 256, len(self.name)
)
if _field_size(self.value) > 1024:
raise EmbedFieldError.from_desc(
"Field value", 1024, len(self.value)
)
# TODO: Handle Bad Request if embed that is too big is sent
# https://discord.com/developers/docs/resources/channel#embed-limits
# Currently ignored since I don't think it would make sense to put
# This with the Embed class
@dataclass
class Embed(APIObject):
"""Representation of the discord Embed class
Attributes
----------
title: APINullable[:class:`str`]
Embed title.
description: APINullable[:class:`str`]
Embed description.
color: APINullable[:class:`int`]
Embed color code.
fields: List[:class:`~pincer.objects.message.embed.EmbedField`]
Fields information.
footer: APINullable[:class:`~pincer.objects.message.embed.EmbedFooter`]
Footer information.
image: APINullable[:class:`~pincer.objects.message.embed.EmbedImage`]
Image information.
provider: APINullable[:class:`~pincer.objects.message.embed.EmbedProvider`]
Provider information.
thumbnail: APINullable[:class:`~pincer.objects.message.embed.EmbedThumbnail`]
Thumbnail information.
timestamp: APINullable[:class:`str`]
Timestamp of embed content in ISO format.
url: APINullable[:class:`str`]
Embed url.
video: APINullable[:class:`~pincer.objects.message.embed.EmbedVideo`]
Video information.
type: APINullable[:class:`int`]
type of message
"""
# noqa: E501
title: APINullable[str] = MISSING
description: APINullable[str] = MISSING
color: APINullable[int] = MISSING
fields: list[EmbedField] = field(default_factory=list)
footer: APINullable[EmbedFooter] = MISSING
image: APINullable[EmbedImage] = MISSING
provider: APINullable[EmbedProvider] = MISSING
thumbnail: APINullable[EmbedThumbnail] = MISSING
timestamp: APINullable[str] = MISSING
author: APINullable[EmbedAuthor] = MISSING
url: APINullable[str] = MISSING
video: APINullable[EmbedVideo] = MISSING
type: APINullable[int] = MISSING
def __post_init__(self):
if _field_size(self.title) > 256:
raise EmbedFieldError.from_desc(
"Embed title", 256, len(self.title)
)
if _field_size(self.description) > 4096:
raise EmbedFieldError.from_desc(
"Embed description", 4096, len(self.description)
)
if len(self.fields) > 25:
raise EmbedFieldError.from_desc(
"Embed field", 25, len(self.fields)
)
def set_timestamp(self, time: datetime) -> Embed:
"""Discord uses iso format for time stamps.
This function will set the time to that format.
Parameters
----------
time : :class:`datetime.datetime`
The datetime to set the timestamp to.
Returns
-------
:class:`~pincer.objects.message.embed.Embed`
The new embed object.
"""
self.timestamp = time.isoformat()
return self
def set_author(
self,
icon_url: APINullable[str] = MISSING,
name: APINullable[str] = MISSING,
proxy_icon_url: APINullable[str] = MISSING,
url: APINullable[str] = MISSING
) -> Embed:
"""Set the author message for the embed. This is the top
field of the embed.
Parameters
----------
icon_url: APINullable[:class:`str`]
The icon which will be next to the author name.
name: APINullable[:class:`str`]
The name for the author (so the message).
proxy_icon_url: APINullable[:class:`str`]
A proxied url of the author icon.
url: APINullable[:class:`str`]
The url for the author name, this will make the
name field a link/url.
Returns
-------
:class:`~pincer.objects.message.embed.Embed`
The new embed object.
"""
self.author = EmbedAuthor(
icon_url=icon_url,
name=name,
proxy_icon_url=proxy_icon_url,
url=url
)
return self
def set_image(
self,
url: APINullable[str] = MISSING,
proxy_url: APINullable[str] = MISSING,
height: APINullable[int] = MISSING,
width: APINullable[int] = MISSING
) -> Embed:
"""Sets an image for your embed.
Parameters
----------
url: APINullable[:class:`str`]
Source url of the video
proxy_url: APINullable[:class:`str`]
A proxied url of the video
height: APINullable[:class:`int`]
Height of the video
width: APINullable[:class:`int`]
Width of the video
Returns
-------
:class:`~pincer.objects.message.embed.Embed`
The new embed object.
"""
self.image = EmbedImage(
height=height,
url=url,
proxy_url=proxy_url,
width=width
)
return self
def set_thumbnail(
self,
height: APINullable[int] = MISSING,
url: APINullable[str] = MISSING,
proxy_url: APINullable[str] = MISSING,
width: APINullable[int] = MISSING
) -> Embed: # ? its normally smaller in the corner?
"""Sets the thumbnail of the embed.
This image is bigger than the ``image`` property.
url: APINullable[:class:`str`]
Source url of the video
proxy_url: APINullable[:class:`str`]
A proxied url of the video
height: APINullable[:class:`int`]
Height of the video
width: APINullable[:class:`int`]
Width of the video
Returns
-------
:class:`~pincer.objects.message.embed.Embed`
The new embed object.
"""
self.thumbnail = EmbedThumbnail(
height=height,
url=url,
proxy_url=proxy_url,
width=width
)
return self
def set_footer(
self,
text: str,
icon_url: APINullable[str] = MISSING,
proxy_icon_url: APINullable[str] = MISSING
) -> Embed:
"""
Sets the embed footer. This is at the bottom of your embed.
Parameters
----------
text: :class:`str`
Footer text
icon_url: APINullable[:class:`str`]
Url of the footer icon
proxy_icon_url: APINullable[:class:`str`]
A proxied url of the footer icon
Returns
-------
:class:`~pincer.objects.message.embed.Embed`
The new embed object.
"""
self.footer = EmbedFooter(
text=text,
icon_url=icon_url,
proxy_icon_url=proxy_icon_url
)
return self
def add_field(
self,
name: str,
value: str,
inline: APINullable[bool] = MISSING
) -> Embed:
"""Adds a field to the embed.
An embed can contain up to 25 fields.
Parameters
----------
name: :class:`str`
The name of the field
value: :class:`str`
The text in the field
inline: APINullable[:class:`bool`]
Whether or not this field should display inline
Raises
------
EmbedFieldError:
Raised when there are more than 25 fields in the embed
"""
_field = EmbedField(
name=name,
value=value,
inline=inline
)
if len(self.fields) > 25:
raise EmbedFieldError.from_desc(
"Embed field", 25, len(self.fields) + 1
)
self.fields += [_field]
return self
def add_fields(
self,
field_list: Union[Dict[Any, Any], Iterable[Iterable[Any, Any]]],
checks: Optional[Callable[[Any], Any]] = bool,
map_title: Optional[Callable[[Any], str]] = str,
map_values: Optional[Callable[[Any], str]] = str,
inline: bool = True
) -> Embed:
"""Add multiple fields from a list,
dict or generator of fields with possible mapping.
Parameters
----------
field_list: Union[Dict[Any, Any], Iterable[Iterable[Any, Any]]]
A iterable or generator of the fields to add.
If the field_list type is a dictionary, will take items.
checks: Optional[Callable[[Any], Any]]
A filter function to remove embed fields.
map_title: Optional[Callable[[Any], :class:`str`]]
A transform function to change the titles.
map_values: Optional[Callable[[Any], :class:`str`]]
A transform function to change the values.
inline: :class:`bool`
Whether to create grid or each field on a new line.
Raises
------
EmbedFieldError:
Raised when there are more than 25 fields in the embed
Returns
-------
:class:`~pincer.objects.message.embed.Embed`
The new embed object.
"""
if isinstance(field_list, dict):
field_list: Iterable[Iterable[Any, Any]] = field_list.items()
for field_name, field_value in field_list:
val = (
map_values(field_value)
if not isinstance(field_value, tuple)
else map_values(*field_value)
)
if checks(val):
self.add_field(
name=map_title(field_name),
value=val,
inline=inline
)
return self
|
py | b40bd6e14d602d10bdd138855f9375b23b5455d4 | '''
code adapted from PyTorch examples
'''
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
from torch.nn import functional as F
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import models.deterministic.resnet_large as resnet
from torchsummary import summary
from utils import util
import csv
import numpy as np
from utils.util import get_rho
from torch.utils.tensorboard import SummaryWriter
torchvision.set_image_backend('accimage')
model_names = sorted(
name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet") and callable(resnet.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data',
metavar='DIR',
default='data/imagenet',
help='path to dataset')
parser.add_argument('-a',
'--arch',
metavar='ARCH',
default='resnet50',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j',
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs',
default=90,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--val_batch_size', default=1000, type=int)
parser.add_argument('-b',
'--batch-size',
default=32,
type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr',
'--learning-rate',
default=0.001,
type=float,
metavar='LR',
help='initial learning rate',
dest='lr')
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--wd',
'--weight-decay',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p',
'--print-freq',
default=10,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained',
dest='pretrained',
action='store_true',
default=True,
help='use pre-trained model')
parser.add_argument('--world-size',
default=-1,
type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank',
default=-1,
type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url',
default='tcp://224.66.41.62:23456',
type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend',
default='nccl',
type=str,
help='distributed backend')
parser.add_argument('--seed',
default=None,
type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed',
action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--mode', type=str, required=True, help='train | test')
parser.add_argument('--save-dir',
dest='save_dir',
help='The directory used to save the trained models',
default='./checkpoint/deterministic',
type=str)
parser.add_argument(
'--tensorboard',
type=bool,
default=True,
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument(
'--log_dir',
type=str,
default='./logs/imagenet/deterministic',
metavar='N',
help='use tensorboard for logging and visualization of training progress')
best_acc1 = 0
len_trainset = 1281167
len_valset = 50000
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
if torch.cuda.is_available():
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker,
nprocs=ngpus_per_node,
args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model = torch.nn.DataParallel(resnet.__dict__[args.arch](pretrained=True))
if torch.cuda.is_available():
model.cuda()
else:
model.cpu()
# define loss function (criterion) and optimizer
if torch.cuda.is_available():
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
else:
criterion = nn.CrossEntropyLoss().cpu()
optimizer = torch.optim.SGD(model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
tb_writer = None
if args.tensorboard:
logger_dir = os.path.join(args.log_dir, 'tb_logger')
if not os.path.exists(logger_dir):
os.makedirs(logger_dir)
tb_writer = SummaryWriter(logger_dir)
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
val_dataset = datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
print('len trainset: ', len(train_dataset))
print('len valset: ', len(val_dataset))
len_trainset = len(train_dataset)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=args.val_batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
if args.mode == 'train':
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args,
tb_writer)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, epoch, args,
tb_writer)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if is_best:
save_checkpoint(
{
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
},
is_best,
filename=os.path.join(args.save_dir,
'{}_imagenet.pth'.format(args.arch)))
elif args.mode == 'test':
checkpoint_file = args.save_dir + '/{}_imagenet.pth'.format(args.arch)
if os.path.exists(checkpoint_file):
checkpoint = torch.load(checkpoint_file)
print('load checkpoint.')
model.load_state_dict(checkpoint['state_dict'])
#Evaluate on test dataset
test_acc = evaluate(model, val_loader, args)
print('******Test data***********\n')
print('test_acc: ', test_acc)
def train(train_loader, model, criterion, optimizer, epoch, args, tb_writer):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
global opt_th
progress = ProgressMeter(len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.mean().backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
if tb_writer is not None:
tb_writer.add_scalar('train/loss', loss.item(), epoch)
tb_writer.add_scalar('train/accuracy', acc1.item(), epoch)
tb_writer.flush()
def validate(val_loader, model, criterion, epoch, args, tb_writer):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
preds_list = []
labels_list = []
unc_list = []
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1,
top5=top5))
return top1.avg
def evaluate(model, val_loader, args):
pred_probs_mc = []
test_loss = 0
correct = 0
with torch.no_grad():
output_list = []
label_list = []
model.eval()
begin = time.time()
for batch_idx, (data, target) in enumerate(val_loader):
#print('Batch idx {}, data shape {}, target shape {}'.format(batch_idx, data.shape, target.shape))
if torch.cuda.is_available():
data, target = data.cuda(non_blocking=True), target.cuda(
non_blocking=True)
else:
data, target = data.cpu(non_blocking=True), target.cpu(
non_blocking=True)
output = model.forward(data)
pred_probs = torch.nn.functional.softmax(output, dim=1)
output_list.append(pred_probs)
label_list.append(target)
end = time.time()
print("inference throughput: ", len_valset / (end - begin),
" images/s")
labels = torch.cat(label_list).cuda()
probs = torch.cat(output_list).cuda()
target_labels = labels.data.cpu().numpy()
pred_mean = probs.data.cpu().numpy()
Y_pred = np.argmax(pred_mean, axis=1)
test_acc = (Y_pred == target_labels).mean()
print('Test accuracy:', test_acc * 100)
np.save(args.log_dir + '/deterministic_imagenet_probs.npy', pred_mean)
np.save(args.log_dir + '/deterministic_imagenet_labels.npy',
target_labels)
return test_acc
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1**(epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1, )):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
py | b40bd7982e22e79153abf01f69f87dfc76395f9c | from tests.conftest import * # NOQA
from tests.data_source.conftest import * # NOQA
|
py | b40bd7f40b762a6157684fead756ec914186f88e | import requests
from urllib3.util import Timeout
from datadog_checks.base.checks import AgentCheck
from datadog_checks.base.errors import CheckException
class Neo4jCheck(AgentCheck):
SERVICE_CHECK_NAME = 'neo4j.can_connect'
# Neo4j metrics to send
keys = set(
[
'storecreationdate',
'storelogversion',
'kernelstarttime',
'lastcommittedtxid',
'peaknumberofconcurrenttransactions',
'numberofrolledbacktransactions',
'numberofopentransactions',
'numberofopenedtransactions',
'numberofcommittedtransactions',
'logicallogsize',
'propertystoresize',
'arraystoresize',
'totalstoresize',
'relationshipstoresize',
'stringstoresize',
'nodestoresize',
'locks',
'numberofaverteddeadlocks',
'numberofrelationshipidsinuse',
'numberofpropertyidsinuse',
'numberofnodeidsinuse',
'numberofrelationshiptypeidsinuse',
'memorypools',
'pins',
'evictions',
'byteswritten',
'filemappings',
'fileunmappings',
'bytesread',
'flushes',
'evictionexceptions',
'faults',
'ha.pull_interval',
'dbms.memory.pagecache.size',
]
)
display = {
'storecreationdate': 'neo4j.store.creationdate',
'storelogversion': 'neo4j.store.log.version',
'kernelstarttime': 'neo4j.kernel.starttime',
'lastcommittedtxid': 'neo4j.last.committed.transaction.id',
'peaknumberofconcurrenttransactions': 'neo4j.peak.concurrent.transactions',
'numberofrolledbacktransactions': 'neo4j.peak.rolledback.transactions',
'numberofopentransactions': 'neo4j.open.transactions',
'numberofopenedtransactions': 'neo4j.opened.transactions',
'numberofcommittedtransactions': 'neo4j.committed.transactions',
'logicallogsize': 'neo4j.logicallog.size',
'propertystoresize': 'neo4j.property.store.size',
'arraystoresize': 'neo4j.array.store.size',
'totalstoresize': 'neo4j.total.store.size',
'relationshipstoresize': 'neo4j.relationship.store.size',
'stringstoresize': 'neo4j.string.store.size',
'nodestoresize': 'neo4j.node.store.size',
'locks': 'neo4j.locks',
'numberofaverteddeadlocks': 'neo4j.adverted.locks',
'numberofrelationshipidsinuse': 'neo4j.relationship.ids.inuse',
'numberofpropertyidsinuse': 'neo4j.property.ids.inuse',
'numberofnodeidsinuse': 'neo4j.node.ids.inuse',
'numberofrelationshiptypeidsinuse': 'neo4j.relationshiptype.ids.inuse',
'memorypools': 'neo4j.memory.pools',
'pins': 'neo4j.page.cache.pins',
'evictions': 'neo4j.page.cache.evictions',
'byteswritten': 'neo4j.bytes.written',
'filemappings': 'neo4j.page.cache.file.mappings',
'fileunmappings': 'neo4j.page.cache.file.unmappings',
'bytesread': 'neo4j.bytes.read',
'flushes': 'neo4j.page.cache.flushes',
'evictionexceptions': 'neo4j.page.cache.eviction.exceptions',
'faults': 'neo4j.page.cache.faults',
'ha.pull_interval': 'neo4j.ha.pull_interval',
'dbms.memory.pagecache.size': 'neo4j.dbms.memory.pagecache.size',
}
def check(self, instance):
host, port, user, password, timeout, server_name = self._get_config(instance)
tags = instance.get('tags', [])
tags.append('server_name:{}'.format(server_name))
service_check_tags = tags + ['url:{}'.format(host)]
auth = (user, password)
# Neo specific
# Create payload using built-in Neo4j queryJmx stored procedure
payload = {
"statements": [
{
"statement": "CALL dbms.queryJmx('org.neo4j:*') yield attributes with "
"keys(attributes) as k, attributes unwind k as "
"row return row, attributes[row]['value'];"
}
]
}
try:
version = self._get_version(host, port, timeout, auth, service_check_tags)
if version > 2:
check_url = "{}:{}/db/data/transaction/commit".format(host, port)
else:
check_url = "{}:{}/v1/service/metrics".format(host, port)
r = requests.post(check_url, auth=auth, json=payload, timeout=timeout)
except Exception as e:
msg = "Unable to fetch Neo4j stats: {}".format(e)
self._critical_service_check(service_check_tags, msg)
raise CheckException(msg)
if r.status_code != 200:
msg = "Unexpected status of {0} when fetching Neo4j stats, response: {1}"
msg = msg.format(r.status_code, r.text)
self._critical_service_check(service_check_tags, msg)
r.raise_for_status()
stats = r.json()
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags)
for doc in stats['results'][0]['data']:
name = doc['row'][0].lower()
if name in self.keys:
try:
self.gauge(self.display.get(name, ""), doc['row'][1], tags=tags)
except TypeError:
continue
except ValueError:
continue
def _get_config(self, instance):
host = instance.get('neo4j_url', '')
port = int(instance.get('port', 7474))
user = instance.get('user', '')
password = str(instance.get('password', ''))
connect_timeout = instance.get('connect_timeout')
server_name = instance.get('server_name', '')
timeout = None
if connect_timeout:
timeout = Timeout(connect=connect_timeout)
return host, port, user, password, timeout, server_name
def _get_version(self, host, port, timeout, auth, service_check_tags):
version_url = '{}:{}/db/data/'.format(host, port)
headers_sent = {'Content-Type': 'application/json'}
r = requests.get(version_url, auth=auth, headers=headers_sent, timeout=timeout)
if r.status_code != 200:
msg = "unexpected status of {0} when fetching Neo4j stats, response: {1}"
msg = msg.format(r.status_code, r.text)
self._critical_service_check(service_check_tags, msg)
r.raise_for_status()
stats = r.json()
version = stats.get('neo4j_version')
self.log.debug("Neo4j version: %s", version)
version = version.split('.')
if version:
return int(version[0])
return 0
def _critical_service_check(self, service_check_tags, message):
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=message)
|
py | b40bd804d3451a7dff8b6d5f80920b5fcd59fb01 | import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="parcats", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `line.colorscale`. Has an effect
only if in `line.color`is set to a numerical
array. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette
will be chosen according to whether numbers in
the `color` array are all positive, all
negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `line.color`) or the bounds set in
`line.cmin` and `line.cmax` Has an effect only
if in `line.color`is set to a numerical array.
Defaults to `false` when `line.cmin` and
`line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `line.color`is set to a
numerical array. Value should have the same
units as in `line.color` and if set,
`line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `line.cmin` and/or `line.cmax` to be
equidistant to this point. Has an effect only
if in `line.color`is set to a numerical array.
Value should have the same units as in
`line.color`. Has no effect when `line.cauto`
is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `line.color`is set to a
numerical array. Value should have the same
units as in `line.color` and if set,
`line.cmax` must be set as well.
color
Sets thelinecolor. It accepts either a specific
color or an array of numbers that are mapped to
the colorscale relative to the max and min
values of the array or relative to `line.cmin`
and `line.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.parcats.line.Color
Bar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`line.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`line.cmin` and `line.cmax`.
Alternatively, `colorscale` may be a palette
name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,R
ainbow,Portland,Jet,Hot,Blackbody,Earth,Electri
c,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's
syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}".
https://github.com/d3/d3-time-
format#locale_format for details on the date
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event
data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
variables `count` and `probability`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
reversescale
Reverses the color mapping if true. Has an
effect only if in `line.color`is set to a
numerical array. If true, `line.cmin` will
correspond to the last color in the array and
`line.cmax` will correspond to the first color.
shape
Sets the shape of the paths. If `linear`, paths
are composed of straight lines. If `hspline`,
paths are composed of horizontal curved splines
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `line.color`is set to a numerical array.
""",
),
**kwargs
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.