repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Mariaanisimova/pythonintask | PINp/2014/TsivunchikDVnew/task_7_25.py | 1 | 1988 | # Задание 7. Вариант 25.
# Разработайте систему начисления очков для задачи 6, в соответствии с
# которой игрок получал бы большее количество баллов за меньшее количество
# попыток.
# Цивунчик Денис Владимирович
# 27.05.2016
import random
guessesTaken = 0
print("Привет, отгадай один из четрыех океанов Земли.")
x = random.choice(["Северный Ледовитый", "Атлантический", "Индийский", "Тихий"])
while guessesTaken < 10:
print("Я загадала: ")
guess = input()
guessesTaken = guessesTaken + 1
if guess != x:
print("Неверно.")
if guess == x:
break
if guess == x:
print("Верно! Число попыток:", guessesTaken)
if guessesTaken == 1:
print("Баллов заработано:", 100)
if guessesTaken == 2:
print("Баллов заработано:", 90)
if guessesTaken == 3:
print("Баллов заработано:", 80)
if guessesTaken == 4:
print("Баллов заработано:", 70)
if guessesTaken == 5:
print("Баллов заработано:", 60)
if guessesTaken == 6:
print("Баллов заработано:", 50)
if guessesTaken == 7:
print("Баллов заработано:", 40)
if guessesTaken == 8:
print("Баллов заработано:", 30)
if guessesTaken == 9:
print("Баллов заработано:", 20)
if guessesTaken == 10:
print("Баллов заработано:", 10)
if guess != x:
print("Попытки кончились.")
print("Вы не набрали баллов.")
input("\n\nНажмите Enter для выхода.")
| apache-2.0 | 5,398,708,426,895,249,000 | 27.807692 | 80 | 0.605474 | false |
john-tornblom/llvm-p86 | llvm_p86/symtab.py | 1 | 14287 | # encoding: utf-8
# Copyright (C) 2013 John Törnblom
#
# This file is part of LLVM-P86.
#
# LLVM-P86 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LLVM-P86 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LLVM-P86. If not, see <http://www.gnu.org/licenses/>.
'''
Symbol table for Pascal-86.
'''
import math
import sys
class SymtabException(Exception):
pass
class Type(object):
def __init__(self, identifier=None):
self.identifier = identifier
self.handle = None
@property
def id(self):
return self.identifier
def __eq__(self, obj):
if isinstance(obj, Type):
return self.id == obj.id
return False
def __ne__(self, obj):
if isinstance(obj, Type):
return self.id != obj.id
return True
def __str__(self):
return str(self.id)
def _assert_is_type(ty):
if not isinstance(ty, Type):
raise SymtabException("Invalid type '%s'", type(ty))
# abstract class
class IntType(Type):
def __init__(self, lo, hi, width, val=None):
Type.__init__(self, "p86.int[%d]" % width)
if width <= 0:
raise SymtabException('Invalid integer width %d', width)
self.lo = lo
self.hi = hi
self.width = width
self.value = val
@property
def signed(self):
return self.lo < 0
@property
def unsigned(self):
return self.lo >= 0
class UIntType(IntType):
def __init__(self, width, val=None):
lo = 0
hi = (2 ** width) - 1
IntType.__init__(self, lo, hi, width, val)
Type.__init__(self, "p86.uint[%d]" % width)
class SIntType(IntType):
def __init__(self, width, val=None):
lo = -(2 ** (width - 1))
hi = (2 ** (width - 1)) - 1
IntType.__init__(self, lo, hi, width, val)
Type.__init__(self, "p86.sint[%d]" % width)
class IntRangeType(IntType):
def __init__(self, lo, hi, width=None):
lo = int(lo)
hi = int(hi)
lo_ = min(lo, hi)
hi_ = max(lo, hi)
if not width:
num = max(abs(lo_), abs(hi_))
signed = (lo_ < 0)
if num > (2 ** 16 - signed) - 1:
width = 32
elif num > (2 ** 8 - signed) - 1:
width = 16
else:
width = 8
IntType.__init__(self, lo_, hi_, width)
Type.__init__(self, "p86.range[%d..%d]" % (lo_, hi_))
class EnumType(IntType):
def __init__(self, names, width=None):
assert len(names) > 0
self.names = names
lo = 0
hi = len(names) - 1
if not width:
if hi > (2 ** 16) - 1:
width = 32
elif hi > (2 ** 8) - 1:
width = 16
else:
width = 8
IntType.__init__(self, lo, hi, width)
Type.__init__(self, "p86.enum[%d..%d]" % (lo, hi))
class BoolType(IntType):
def __init__(self, val=None):
lo = 0
hi = 1
width = 1
IntType.__init__(self, lo, hi, width, val)
Type.__init__(self, "p86.bool")
class CharType(Type):
def __init__(self, val=None):
self.hi = 255
self.lo = 0
self.width = 8
self.value = None
self.signed = False
self.unsigned = True
self.value = val
Type.__init__(self, "p86.char")
class CharRangeType(CharType):
def __init__(self, lo, hi):
self.lo = ord(lo)
self.hi = ord(hi)
self.width = 8
self.signed = False
self.unsigned = True
Type.__init__(self, "p86.range[%c..%c]" % (self.lo, self.hi))
class RealType(Type):
def __init__(self, width=32):
self.width = width
Type.__init__(self, "p86.real[%d]" % width)
class FloatType(RealType):
def __init__(self):
RealType.__init__(self, 32)
class DoubleType(RealType):
def __init__(self):
RealType.__init__(self, 64)
class TempRealType(RealType):
def __init__(self):
RealType.__init__(self, 80)
class NamedType(Type):
def __init__(self, name):
self.name = name
Type.__init__(self, name)
class DeferredType(NamedType):
# Type required when named types are used
# before being defined.
def __init__(self, name):
NamedType.__init__(self, name)
@property
def id(self):
return "p86.deferred[%s]" % self.name
class ArrayType(Type):
def __init__(self, element_ty, range_ty):
_assert_is_type(element_ty)
_assert_is_type(range_ty)
self.element = element_ty
self.range = range_ty
Type.__init__(self)
@property
def id(self):
return "p86.array[%d..%d] of %s" % (self.range.lo, self.range.hi,
self.element)
@property
def width(self):
return self.element.width * self.length
@property
def length(self):
return self.range.hi - self.range.lo + 1
class StringType(ArrayType):
def __init__(self, length):
element_ty = CharType()
range_ty = IntRangeType(0, length - 1)
ArrayType.__init__(self, element_ty, range_ty)
class SetType(Type):
def __init__(self, element_ty):
_assert_is_type(element_ty)
self.element = element_ty
Type.__init__(self)
@property
def id(self):
return "p86.set of %s" % self.element
@property
def width(self):
return 2 ** self.element.width
class EmptySetType(Type):
def __init__(self):
self.value = 0
Type.__init__(self, "p86.emptyset")
class VoidType(Type):
def __init__(self):
Type.__init__(self, "p86.void")
class AnyType(Type):
def __init__(self):
Type.__init__(self, "p86.any")
class ReferenceType(Type):
def __init__(self, referee_ty):
_assert_is_type(referee_ty)
self.referee = referee_ty
Type.__init__(self)
@property
def id(self):
return "p86.reference of %s" % self.referee
class PointerType(Type):
def __init__(self, pointee_ty):
_assert_is_type(pointee_ty)
self.pointee = pointee_ty
Type.__init__(self)
@property
def id(self):
return "p86.pointer to %s" % self.pointee
@property
def width(self):
return math.log(sys.maxsize, 2) + 1
class FunctionType(NamedType):
def __init__(self, module, name, ret_ty=VoidType(), scope_level=0):
_assert_is_type(ret_ty)
self.ret = ret_ty
self.params = list()
self.namespace = module + '.' + name
self.scope_level = scope_level
self.scope_hook = None
NamedType.__init__(self, name)
@property
def id(self):
"p86.function[%s]" % self.namespace
class ParameterType(NamedType):
def __init__(self, name, ty):
_assert_is_type(ty)
self.type = ty
NamedType.__init__(self, name)
@property
def id(self):
return "p86.param of %s" % self.type
@property
def width(self):
return self.type.width
class RecordType(NamedType):
def __init__(self, name):
self.fields = list()
self.variant = None
NamedType.__init__(self, name)
@property
def id(self):
return "p86.record[%s]" % self.name
@property
def width(self):
return sum([x.width for x in self.fields])
class VariantType(NamedType):
def __init__(self, name):
self.cases = list()
self.selector = None
NamedType.__init__(self, name)
@property
def id(self):
return "p86.variant[%s]" % self.name
@property
def largest(self):
ty = None
for case in self.cases:
if not ty:
ty = case
elif ty.width < case.width:
ty = case
return ty
@property
def width(self):
return self.largest.width
class FieldType(NamedType):
def __init__(self, name, ty):
_assert_is_type(ty)
self.type = ty
self.index = None
NamedType.__init__(self, name)
@property
def id(self):
return "p86.field[%s]" % self.name
@property
def width(self):
return self.type.width
class ScopeHookType(NamedType):
def __init__(self, name):
self.fields = list()
NamedType.__init__(self, name)
@property
def id(self):
return "p86.scope_hook[%s]" % self.name
class ScopeFieldType(FieldType):
def __init__(self, name, ty):
_assert_is_type(ty)
self.type = ty
self.index = None
NamedType.__init__(self, name)
@property
def id(self):
return "p86.scope_field[%s]" % self.name
@property
def width(self):
return self.type.width
class FileType(Type):
def __init__(self, component_ty):
_assert_is_type(component_ty)
self.component_ty = component_ty
Type.__init__(self)
@property
def id(self):
return "p86.file of %s" % self.component
def _assert_is_value(value):
if not isinstance(value, Value):
raise SymtabException("Invalid value '%s'", value)
class Value(object):
def __init__(self, handle, ty):
_assert_is_type(ty)
self.handle = handle
self.type = ty
def __str__(self):
return str(self.type)
class VariableValue(Value):
pass
class ConstantValue(Value):
pass
class FunctionValue(Value):
pass
class GotoBlock(object):
def __init__(self):
self.handle = None
self.entries = list()
class Symbol(object):
def __init__(self, name, ty, handle=None):
_assert_is_type(ty)
self.name = name
self.type = ty
self.handle = handle
def __str__(self):
return "%s (%s)" % (self.name, self.type)
class Scope(object):
def __init__(self):
self.symbols = dict()
self.typedefs = dict()
self.gotos = dict()
self.functions = dict()
def dump_symbols(self, prefix=""):
for name in list(self.symbols.keys()):
print(("%s: %s" % (prefix, name)))
sym = self.symbols[name]
if isinstance(sym, Scope):
sym.dump_symbols(prefix + " ")
def dump_functions(self, prefix=""):
for name in list(self.functions.keys()):
print(("%s: %s" % (prefix, name)))
def dump_typedefs(self, prefix=""):
for name in list(self.typedefs.keys()):
print(("%s: %s" % (prefix, name)))
sym = self.typedefs[name]
if isinstance(sym, Scope):
sym.dump_typedefs(prefix + " ")
class SymbolTable(object):
def __init__(self):
self._scopes = list()
self._lvl = -1 # scope level counter
self._lbl = 0 # Next label number
def label(self, s='label'):
self._lbl += 1
return "%s_%d" % (s, self._lbl)
def dump_symbols(self):
print('---------- SYMBOLS --------------')
for i, scope in enumerate(self._scopes):
scope.dump_symbols(" " * i)
def dump_functions(self):
print('--------- FUNCTIONS -------------')
for i, scope in enumerate(self._scopes):
scope.dump_functions(" " * i)
def dump_typedefs(self):
print('--------- TYPEDEFS --------------')
for i, scope in enumerate(self._scopes):
scope.dump_typedefs(" " * i)
def enter_scope(self):
scope = Scope()
self._lvl += 1
self._scopes.append(scope)
def exit_scope(self):
self._scopes.pop(self._lvl)
self._lvl -= 1
@property
def symbols(self):
d = dict()
for i in range(self._lvl + 1):
l = list(d.items())
l += list(self._scopes[i].symbols.items())
d = dict(l)
return d.values()
def install_symbol(self, name, ty, handle=None):
scope = self._scopes[self._lvl]
sym = Symbol(name, ty, handle)
scope.symbols[name] = sym
return sym
def find_symbol(self, name):
for i in range(self._lvl, -1, -1):
scope = self._scopes[i]
if name in scope.symbols:
return scope.symbols[name]
raise SymtabException("Unknown symbol '%s'" % name)
def install_const(self, name, ty, handle):
scope = self._scopes[self._lvl]
const = ConstantValue(handle, ty)
scope.symbols[name] = const
return const
def install_typedef(self, name, ty):
scope = self._scopes[self._lvl]
scope.typedefs[name] = ty
return ty
def find_typedef(self, name):
for i in range(self._lvl, -1, -1):
scope = self._scopes[i]
if name in scope.typedefs:
return scope.typedefs[name]
raise SymtabException("Unknown typedef '%s'" % name)
def install_function(self, name, ty, handle=None):
scope = self._scopes[self._lvl]
func = FunctionValue(handle, ty)
scope.functions[name] = func
return func
def find_function(self, name):
for i in range(self._lvl, -1, -1):
scope = self._scopes[i]
if name in scope.functions:
return scope.functions[name]
raise SymtabException("Unknown function '%s'" % name)
def install_goto(self, name, goto):
scope = self._scopes[self._lvl]
scope.gotos[name] = goto
return goto
def find_goto(self, name):
for i in range(self._lvl, -1, -1):
scope = self._scopes[i]
if name in scope.gotos:
return scope.gotos[name]
raise SymtabException("Unknown goto label '%s'" % name)
| gpl-3.0 | 5,204,515,369,522,514,000 | 20.744292 | 73 | 0.539549 | false |
CingHu/neutron-ustack | neutron/db/agents_db.py | 1 | 11351 | # Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
from oslo.config import cfg
from oslo.db import exception as db_exc
import sqlalchemy as sa
from sqlalchemy.orm import exc
from sqlalchemy import sql
from neutron.common import rpc as n_rpc
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import agent as ext_agent
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils
LOG = logging.getLogger(__name__)
cfg.CONF.register_opt(
cfg.IntOpt('agent_down_time', default=75,
help=_("Seconds to regard the agent is down; should be at "
"least twice report_interval, to be sure the "
"agent is down for good.")))
class Agent(model_base.BASEV2, models_v2.HasId):
"""Represents agents running in neutron deployments."""
__table_args__ = (
sa.UniqueConstraint('agent_type', 'host',
name='uniq_agents0agent_type0host'),
)
# L3 agent, DHCP agent, OVS agent, LinuxBridge
agent_type = sa.Column(sa.String(255), nullable=False)
binary = sa.Column(sa.String(255), nullable=False)
# TOPIC is a fanout exchange topic
topic = sa.Column(sa.String(255), nullable=False)
# TOPIC.host is a target topic
host = sa.Column(sa.String(255), nullable=False)
admin_state_up = sa.Column(sa.Boolean, default=True,
server_default=sql.true(), nullable=False)
# the time when first report came from agents
created_at = sa.Column(sa.DateTime, nullable=False)
# the time when first report came after agents start
started_at = sa.Column(sa.DateTime, nullable=False)
# updated when agents report
heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
# description is note for admin user
description = sa.Column(sa.String(255))
# configurations: a json dict string, I think 4095 is enough
configurations = sa.Column(sa.String(4095), nullable=False)
reserved = sa.Column(sa.Boolean, default=True, nullable=False)
@property
def is_active(self):
return not AgentDbMixin.is_agent_down(self.heartbeat_timestamp)
class AgentDbMixin(ext_agent.AgentPluginBase):
"""Mixin class to add agent extension to db_base_plugin_v2."""
def _get_agent(self, context, id):
try:
agent = self._get_by_id(context, Agent, id)
except exc.NoResultFound:
raise ext_agent.AgentNotFound(id=id)
return agent
def get_enabled_agent_on_host(self, context, agent_type, host):
"""Return agent of agent_type for the specified host."""
query = context.session.query(Agent)
query = query.filter(Agent.agent_type == agent_type,
Agent.host == host,
Agent.admin_state_up == sql.true())
try:
agent = query.one()
except exc.NoResultFound:
LOG.debug('No enabled %(agent_type)s agent on host '
'%(host)s' % {'agent_type': agent_type, 'host': host})
return
if self.is_agent_down(agent.heartbeat_timestamp):
LOG.warn(_('%(agent_type)s agent %(agent_id)s is not active')
% {'agent_type': agent_type, 'agent_id': agent.id})
return agent
@classmethod
def is_agent_down(cls, heart_beat_time):
return timeutils.is_older_than(heart_beat_time,
cfg.CONF.agent_down_time)
def get_configuration_dict(self, agent_db):
try:
conf = jsonutils.loads(agent_db.configurations)
except Exception:
msg = _('Configuration for agent %(agent_type)s on host %(host)s'
' is invalid.')
LOG.warn(msg, {'agent_type': agent_db.agent_type,
'host': agent_db.host})
conf = {}
return conf
def _make_agent_dict(self, agent, fields=None):
attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get(
ext_agent.RESOURCE_NAME + 's')
res = dict((k, agent[k]) for k in attr
if k not in ['alive', 'configurations'])
res['alive'] = not AgentDbMixin.is_agent_down(
res['heartbeat_timestamp'])
res['configurations'] = self.get_configuration_dict(agent)
return self._fields(res, fields)
def delete_agent(self, context, id):
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
context.session.delete(agent)
def update_agent(self, context, id, agent):
agent_data = agent['agent']
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
agent.update(agent_data)
return self._make_agent_dict(agent)
def get_agents_db(self, context, filters=None):
query = self._get_collection_query(context, Agent, filters=filters)
return query.all()
def get_agents(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
marker_obj = self._get_marker_obj(context, 'agent', limit, marker)
return self._get_collection(context, Agent,
self._make_agent_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def _get_agent_by_type_and_host(self, context, agent_type, host):
query = self._model_query(context, Agent)
try:
agent_db = query.filter(Agent.agent_type == agent_type,
Agent.host == host).one()
return agent_db
except exc.NoResultFound:
raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type,
host=host)
except exc.MultipleResultsFound:
raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type,
host=host)
def _get_agent_by_type(self, context, agent_type):
query = self._model_query(context, Agent)
try:
agent_db = query.filter(Agent.agent_type == agent_type).all()
return agent_db
except exc.NoResultFound:
raise ext_agent.AgentNotFoundByAgentType(agent_type=agent_type)
def get_agent(self, context, id, fields=None):
agent = self._get_agent(context, id)
return self._make_agent_dict(agent, fields)
def _create_or_update_agent(self, context, agent):
with context.session.begin(subtransactions=True):
res_keys = ['agent_type', 'binary', 'host', 'topic']
res = dict((k, agent[k]) for k in res_keys)
configurations_dict = agent.get('configurations', {})
res['configurations'] = jsonutils.dumps(configurations_dict)
current_time = timeutils.utcnow()
try:
agent_db = self._get_agent_by_type_and_host(
context, agent['agent_type'], agent['host'])
res['heartbeat_timestamp'] = current_time
if agent.get('start_flag'):
res['started_at'] = current_time
greenthread.sleep(0)
agent_db.update(res)
except ext_agent.AgentNotFoundByTypeHost:
greenthread.sleep(0)
res['created_at'] = current_time
res['started_at'] = current_time
res['heartbeat_timestamp'] = current_time
res['admin_state_up'] = agent.get('admin_state_up', True)
res['reserved'] = agent.get('reserved', False)
agent_db = Agent(**res)
greenthread.sleep(0)
context.session.add(agent_db)
greenthread.sleep(0)
def create_agent(self, context, agent):
agent = agent['agent']
self.create_or_update_agent(context, agent)
agent_type = agent['agent_type']
host = agent['host']
ag_db = self._get_agent_by_type_and_host(context, agent_type, host)
return self._make_agent_dict(ag_db)
def create_or_update_agent(self, context, agent):
"""Create or update agent according to report."""
try:
return self._create_or_update_agent(context, agent)
except db_exc.DBDuplicateEntry as e:
with excutils.save_and_reraise_exception() as ctxt:
if e.columns == ['agent_type', 'host']:
# It might happen that two or more concurrent transactions
# are trying to insert new rows having the same value of
# (agent_type, host) pair at the same time (if there has
# been no such entry in the table and multiple agent status
# updates are being processed at the moment). In this case
# having a unique constraint on (agent_type, host) columns
# guarantees that only one transaction will succeed and
# insert a new agent entry, others will fail and be rolled
# back. That means we must retry them one more time: no
# INSERTs will be issued, because
# _get_agent_by_type_and_host() will return the existing
# agent entry, which will be updated multiple times
ctxt.reraise = False
return self._create_or_update_agent(context, agent)
class AgentExtRpcCallback(n_rpc.RpcCallback):
"""Processes the rpc report in plugin implementations."""
RPC_API_VERSION = '1.0'
START_TIME = timeutils.utcnow()
def __init__(self, plugin=None):
super(AgentExtRpcCallback, self).__init__()
self.plugin = plugin
def report_state(self, context, **kwargs):
"""Report state from agent to server."""
time = kwargs['time']
time = timeutils.parse_strtime(time)
if self.START_TIME > time:
LOG.debug(_("Message with invalid timestamp received"))
return
agent_state = kwargs['agent_state']['agent_state']
if not self.plugin:
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.create_or_update_agent(context, agent_state)
| apache-2.0 | -816,401,415,570,273,000 | 42.324427 | 79 | 0.594926 | false |
xurble/FeedThing | feedthing/urls.py | 1 | 1666 | from django.urls import path, include
from django.conf.urls import url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from ft.views import *
urlpatterns = [
path('admin/', admin.site.urls),
path('.well-known/<uri>', well_known_uris, name='well_known'),
url(r'^$', index, name="home"),
url(r'^refresh/$', read_request_listener, name="refresh"),
url(r'^help/$', help, name="help"),
url(r'^feeds/$', feeds, name="feeds"),
url(r'^allfeeds/$', allfeeds, name="allfeeds"),
url(r'^addfeed/$', addfeed, name="addfeed"),
url(r'^importopml/$', importopml),
url(r'^feedgarden/$', feedgarden),
url(r'^downloadfeeds/$', downloadfeeds),
path('settings/', user_settings, name='settings'),
path('accounts/', include('django.contrib.auth.urls')),
url(r'^read/(?P<fid>.*)/', readfeed),
url(r'^post/(?P<pid>.*)/save/$',savepost, name="savepost"),
url(r'^post/(?P<pid>.*)/forget/$',forgetpost, name="forgetpost"),
url(r'^saved/$',savedposts, name="savedposts"),
url(r'^manage/$',managefeeds),
url(r'^subscription/list/$',subscriptionlist),
url(r'^subscription/(?P<sid>.*)/unsubscribe/$',unsubscribefeed),
url(r'^subscription/(?P<sid>.*)/details/$',subscriptiondetails),
url(r'^subscription/(?P<sid>.*)/rename/$',subscriptionrename),
url(r'^subscription/(?P<sid>.*)/promote/$',promote),
url(r'^subscription/(?P<sid>.*)/addto/(?P<tid>.*)/$',addto),
url(r'^feed/(?P<fid>.*)/revive/$',revivefeed),
#(r'^feed/(?P<fid>.*)/kill/$',killfeed),
url(r'^feed/(?P<fid>.*)/test/$',testfeed),
]
| mit | 8,063,162,315,167,190,000 | 27.724138 | 69 | 0.609844 | false |
quom/google-cloud-python | dns/google/cloud/dns/client.py | 1 | 4983 | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Cloud DNS API."""
from google.cloud.client import JSONClient
from google.cloud.dns.connection import Connection
from google.cloud.dns.zone import ManagedZone
from google.cloud.iterator import HTTPIterator
class Client(JSONClient):
"""Client to bundle configuration needed for API requests.
:type project: str
:param project: the project which the client acts on behalf of. Will be
passed when creating a zone. If not passed,
falls back to the default inferred from the environment.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not passed (and if no ``http`` object is
passed), falls back to the default inferred from the
environment.
:type http: :class:`~httplib2.Http`
:param http: (Optional) HTTP object to make requests. Can be any object
that defines ``request()`` with the same interface as
:meth:`~httplib2.Http.request`. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
"""
def __init__(self, project=None, credentials=None, http=None):
super(Client, self).__init__(
project=project, credentials=credentials, http=http)
self._connection = Connection(
credentials=self._credentials, http=self._http)
def quotas(self):
"""Return DNS quotas for the project associated with this client.
See:
https://cloud.google.com/dns/api/v1/projects/get
:rtype: mapping
:returns: keys for the mapping correspond to those of the ``quota``
sub-mapping of the project resource.
"""
path = '/projects/%s' % (self.project,)
resp = self._connection.api_request(method='GET', path=path)
return {key: int(value)
for key, value in resp['quota'].items()
if key != 'kind'}
def list_zones(self, max_results=None, page_token=None):
"""List zones for the project associated with this client.
See:
https://cloud.google.com/dns/api/v1/managedZones/list
:type max_results: int
:param max_results: maximum number of zones to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of zones. If
not passed, the API will return the first page of
zones.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.dns.zone.ManagedZone`
belonging to this project.
"""
path = '/projects/%s/managedZones' % (self.project,)
return HTTPIterator(
client=self, path=path, item_to_value=_item_to_zone,
items_key='managedZones', page_token=page_token,
max_results=max_results)
def zone(self, name, dns_name=None, description=None):
"""Construct a zone bound to this client.
:type name: str
:param name: Name of the zone.
:type dns_name: str
:param dns_name:
(Optional) DNS name of the zone. If not passed, then calls to
:meth:`zone.create` will fail.
:type description: str
:param description:
(Optional) the description for the zone. If not passed, defaults
to the value of 'dns_name'.
:rtype: :class:`google.cloud.dns.zone.ManagedZone`
:returns: a new ``ManagedZone`` instance.
"""
return ManagedZone(name, dns_name, client=self,
description=description)
def _item_to_zone(iterator, resource):
"""Convert a JSON managed zone to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type resource: dict
:param resource: An item to be converted to a managed zone.
:rtype: :class:`.ManagedZone`
:returns: The next managed zone in the page.
"""
return ManagedZone.from_api_repr(resource, iterator.client)
| apache-2.0 | -5,358,123,713,847,823,000 | 37.627907 | 77 | 0.628738 | false |
sh-ft/mudwyrm_users | mudwyrm_users/admin/achaea/scripts/gmcp.py | 1 | 5479 | from mudwyrm_users.admin.achaea import ScriptState
from mudwyrm_users.admin.achaea.common import AttrDict
from mudwyrm_users.admin.achaea.trigger import Trigger, Alias, OnEvent
from mudwyrm_users.admin.achaea.scripts import char
import re
p = None
s = ScriptState()
def init(processor):
assert processor is not None
global p
p = processor
s.last_health = 0
s.last_mana = 0
@OnEvent('LoginPrompt')
def on_login_prompt():
#p.gmcp('Core.Supports.Set ["Char 1", "Char.Skills 1", "Char.Items 1", "Room 1", "IRE.Composer 1", "IRE.Rift 1"]')
#p.gmcp('Char.Login { "name": "name", "password": "password" }')
pass
@OnEvent('LoginSuccess')
def on_login_success():
p.gmcp("Char.Skills.Get")
p.gmcp('Char.Items.Inv')
p.gmcp('IRE.Rift.Request')
@OnEvent('Char.Vitals')
def on_char_vitals(v):
s.last_health = char.health
s.last_mana = char.mana
char.health = int(v['hp'])
char.max_health = int(v['maxhp'])
char.mana = int(v['mp'])
char.max_mana = int(v['maxmp'])
char.endurance = int(v['ep'])
char.max_endurance = int(v['maxep'])
char.willpower = int(v['wp'])
char.max_willpower = int(v['maxwp'])
char.level_experience_percentage = int(v['nl'])
health_change = char.health - s.last_health
mana_change = char.mana - s.last_mana
if health_change != 0 and mana_change != 0:
p.echo("%+d health, %+d mana" % (health_change, mana_change))
elif health_change != 0:
p.echo("%+d health" % health_change)
elif mana_change != 0:
p.echo("%+d mana" % mana_change)
@OnEvent('Room.Info')
def on_room_info(v):
#p.debug("Room.Info %r" % v)
room_updated = char.room.get('id', None) is not None
if 'num' in v:
v['id'] = v['num']
del v['num']
char.room.clear()
char.room.update(v)
if room_updated:
p.raise_event('RoomUpdated')
@OnEvent('Room.WrongDir')
def on_room_wrong_dir(v):
p.raise_event('WrongDir', dir=v)
@OnEvent('Char.StatusVars')
def on_char_status_vars(v):
pass
_org_regex = re.compile(r'^(.*?)\((\d+)\)$')
def _parse_org_info(s):
if s == '(None)':
org, rank = None, None
else:
org, rank = _org_regex.match(s).groups()
rank = int(rank)
return org, rank
@OnEvent('Char.Name')
def on_char_name(v):
char.name = v['name']
char.full_name = v['fullname']
@OnEvent('Char.Status')
def on_char_status(v):
if 'city' in v:
char.city = v['city'][:v['city'].find(' ')]
if 'name' in v:
char.name = v['name']
if 'full_name' in v:
char.full_name = v['fullname']
if 'level' in v:
char.level = int(v['level'])
if 'race' in v:
char.race = v['race']
if 'class' in v:
char.class_ = v['class']
if 'house' in v:
char.house, char.house_rank = _parse_org_info(v['house'])
if 'order' in v:
char.order, char.order_rank = _parse_org_info(v['order'])
if 'xp' in v:
char.level_experience_percentage = float(v['xp'][:-1])
@OnEvent('Char.Skills.Groups')
def on_char_skills_groups(v):
if not hasattr(char, 'skill_groups'):
char.skill_groups = {}
char.skill_groups.clear()
char.skill_groups.update(map(lambda d: map(lambda s: s.lower(), d.values()), v))
for skill_group in char.skill_groups.iterkeys():
p.gmcp('Char.Skills.Get { "group": "%s" }' % skill_group)
@OnEvent('Char.Skills.List')
def on_char_skills_list(v):
if not hasattr(char, 'skills'):
char.skills = {}
char.skills[v['group']] = map(lambda s: s.lower(), v['list'])
@OnEvent('Char.Skills.Info')
def on_char_skills_info(v):
p.debug("Char.Skills.Info %s" % v)
################################################################################
def _item_location(v):
if v['location'] == 'room':
return char.room_objects
elif v['location'] == 'inv':
return char.inv
else: # rep<number> location
container_id = int(v['location'][3:])
if container_id not in char.containers:
char.containers[container_id] = {}
return char.containers[container_id]
@OnEvent('Char.Items.List')
def on_char_items_list(v):
#p.debug("Char.Items.List")
items = _item_location(v)
items.clear()
for item in v['items']:
items[int(item['id'])] = item
@OnEvent('Char.Items.Add')
def on_char_items_add(v):
#p.debug("Char.Items.Add %s" % v)
items = _item_location(v)
items[int(v['item']['id'])] = v['item']
@OnEvent('Char.Items.Update')
def on_char_items_update(v):
#p.debug("Char.Items.Update %s" % v)
items = _item_location(v)
items[int(v['item']['id'])] = v['item']
@OnEvent('Char.Items.Remove')
def on_char_items_remove(v):
#p.debug("Char.Items.Remove %s" % v)
items = _item_location(v)
id = v['item']
if id in items:
del items[id]
def _to_rift_item(v):
v['amount'] = int(v['amount'])
return v
@OnEvent('IRE.Rift.List')
def on_ire_rift_list(v):
char.rift.clear()
for i in v:
item = _to_rift_item(i)
char.rift[item['name']] = item
@OnEvent('IRE.Rift.Change')
def on_ir_rift_change(v):
item = _to_rift_item(v)
name = item['name']
if name not in char.rift:
char.rift[name] = item
else:
char.rift[name].update(item)
if char.rift[name]['amount'] == 0:
del char.rift[name]
| mit | 4,818,264,491,026,690,000 | 27.097436 | 118 | 0.57182 | false |
afmurillo/FlowFence | FlowMonitor_3.py | 1 | 19291 |
""" Module that monitors the average network interface occupation """
import subprocess
from collections import deque
import threading
import application_switch_3
import SwitchProperties
import time
class FlowMonitor_3:
""" Class that monitors network interface occupation """
def __init__(self, samples=10, period=3, interval_time=1.0, upper_limit=10*0.8, lower_limit=10*0.6):
self.n_samples = samples
self.period = period
self.interval_time = interval_time
self.switch_properties = SwitchProperties.SwitchProperties()
self.interfaces_list = self.switch_properties.get_interfaces()
self.complete_interface_list = []
self.old_queue_list = []
self.queues_ids = []
self.qos_register = dict.fromkeys(['uuid','port', 'id', 'min-rate', 'max-rate'] )
self.lock = threading.Lock()
for i in range(len(self.interfaces_list)):
complete_interface_dict = dict.fromkeys(['name', 'dpid', 'capacity', 'lower_limit', 'upper_limit', 'threshold', 'samples', 'use_averages', 'monitoring', 'is_congested', 'queueList'])
complete_interface_dict['name'] = self.interfaces_list[i]['name']
complete_interface_dict['dpid'] = self.interfaces_list[i]['dpid']
complete_interface_dict['capacity'] = self.interfaces_list[i]['capacity']
complete_interface_dict['lower_limit'] = lower_limit
complete_interface_dict['upper_limit'] = upper_limit
complete_interface_dict['threshold'] = upper_limit
complete_interface_dict['samples'] = []
complete_interface_dict['prevEma'] = 0
complete_interface_dict['currentEma'] = 0
complete_interface_dict['use_averages'] = 0
complete_interface_dict['monitoring'] = 0
complete_interface_dict['is_congested'] = 0
complete_interface_dict['queueList'] = []
self.complete_interface_list.append(complete_interface_dict)
for i in range(len(self.complete_interface_list)):
self.complete_interface_list[i]['use_averages'] = deque( maxlen=self.n_samples )
#Control variables
self.threads_id = []
self.init_window()
def reset_queues(self):
""" Clears QoS queues in all interfaces """
for i in range(len(self.complete_interface_list)):
subprocess.check_output('ovs-ofctl del-flows ' + self.complete_interface_list[i]['name'], shell=True)
subprocess.check_output('./clear_queues.sh ', shell=True)
def init_window(self):
""" Inits samples window """
for j in range(len(self.complete_interface_list)):
for i in range(self.n_samples):
self.complete_interface_list[j]['use_averages'].append(0)
for i in range(self.n_samples):
#sample list of dicts, each dict has ['name']['sample']
result = self.get_sample()
for j in range(len(self.complete_interface_list)):
last_samples = result[j]['sample']
self.complete_interface_list[j]['use_averages'].popleft()
self.complete_interface_list[j]['use_averages'].append(last_samples)
if i == 0:
self.complete_interface_list[j]['prevema'] = last_samples
for j in range(len(self.complete_interface_list)):
for a_bar in enumerate(self.complete_interface_list[j]['use_averages']):
self.complete_interface_list[j]['currentEma'] = self.ema(a_bar, self.complete_interface_list[j]['use_averages'], self.period, self.complete_interface_list[j]['prevEma'], smoothing=None)
self.complete_interface_list[j]['prevEma'] = self.complete_interface_list[j]['currentEma']
def update_window(self):
""" Updates the sample window """
for i in range(self.n_samples):
# Sample list of dicts, each dict has ['name']['sample']
result = self.get_sample() # < ---- GOTTA CHECK THIS
last_samples=0
for j in range(len(self.complete_interface_list)):
last_samples = result[j]['sample']
self.complete_interface_list[j]['use_averages'].popleft()
self.complete_interface_list[j]['use_averages'].append(last_samples)
for j in range(len(self.complete_interface_list)):
if i == 0:
self.complete_interface_list[j]['prevema'] = last_samples
for a_bar in enumerate(self.complete_interface_list[j]['use_averages']):
self.complete_interface_list[j]['currentEma'] = self.ema(a_bar, self.complete_interface_list[j]['use_averages'], self.period, self.complete_interface_list[j]['prevEma'], smoothing=None)
self.complete_interface_list[j]['prevEma'] = self.complete_interface_list[j]['currentEma']
def start_monitoring(self):
""" Starts the thread that monitors interface occupation """
self.report_object = application_switch_3.ApplicationSwitch()
self.monitoring=1
self.threads_id.append(threading.Thread(name = 'Monitor', target=self.monitor))
self.threads_id[0].start()
def stop_monitoring(self):
""" Stops monitoring the output interface """
self.monitoring=0
#toDo: Handle
def congestion_stopped(self):
""" Unused """
self.is_congested=0
def monitor(self):
""" Obtains a new sample of the interface occupation average, and in case of congestion, notifies the main module """
self.startup_time = time.time()
while True:
if self.monitoring == 1:
try:
self.update_window()
for j in range(len(self.complete_interface_list)):
#print "update, ema: " + str(self.complete_interface_list[j]['currentEma'])
#print "current threshold: " + str(self.complete_interface_list[j]['threshold'])
if (self.complete_interface_list[j]['is_congested'] == 0) and (self.complete_interface_list[j]['currentEma'] >= self.complete_interface_list[j]['threshold']):
#print "Congested"
self.detection_time = time.time()
self.complete_interface_list[j]['threshold'] = self.complete_interface_list[j]['lower_limit']
self.monitoring = 0
self.report_object.congestion_detected(self.complete_interface_list[j])
elif (self.complete_interface_list[j]['is_congested'] == 1) and (self.complete_interface_list[j]['currentEma'] <= self.complete_interface_list[j]['threshold']):
self.complete_interface_list[j]['is_congested'] = 0
self.complete_interface_list[j]['threshold'] = self.complete_interface_list[j]['upper_limit']
#print "Congestion ceased"
self.report_object.congestion_ceased()
except KeyboardInterrupt:
print " \n *** So long and thanks for all the fish! *** "
self.monitoring = 0
break
def clear_queues(self, controller_message):
subprocess.check_output('./clear_queues.sh ', shell=True)
del self.old_queue_list[:]
self.qos_register['uuid'] = None
del self.queues_ids[:]
def update_queues(self, controller_message):
""" Updates the QoS queues, one queue is created for each flow """
# Here we should:
# 0. Check if register QoS exists, if not create it
# 1. Compare the received queue list with the previous one and:
# 1a. If there are new elements, create and attach a queue for them
# 1b. If there are deleted elements, delete the queue
# 1c. If there are elements with different bw, update it
# 2. Store the old queue list as the updated one
# 3. Notify the controller about the queue completion
# Queues are at (controller_message['bw_list'])
self.lock.acquire()
to_create = []
#to_update =[]
to_delete = []
# Check if qos exists
if self.qos_register['uuid'] == None:
self.create_qos_register(self.complete_interface_list[0]['name'])
print "received bw list: ", controller_message['bw_list']
print "old bw list: ", self.old_queue_list
for j in range(len(controller_message['bw_list'])):
# Flow still exists, getting bw/s
for k in range(len(self.old_queue_list)):
if (controller_message['bw_list'][j]['nw_src'] == self.old_queue_list[k]['nw_src']) and (controller_message['bw_list'][j]['nw_dst'] == self.old_queue_list[k]['nw_dst']):
self.set_queue_bw(self.complete_interface_list[0]['name'], k, controller_message['bw_list'][j]['bw'])
break
# If it wasn't in k-1 and k we could have a) flow ceased b) flow is a new one
if (not any(src['nw_src'] == controller_message['bw_list'][j]['nw_src'] for src in self.old_queue_list)):
# New flow does not exist in the old flow stats, append it
#new_flows_indexes.append(j)
print "New queue to create: ", controller_message['bw_list'][j]
to_create.append(controller_message['bw_list'][j])
continue
if not self.old_queue_list:
print "Empty old list!"
to_create.append(controller_message['bw_list'][j])
for j in range(len(self.old_queue_list)):
if (not any(src['nw_src'] == self.old_queue_list[j]['nw_src'] for src in controller_message['bw_list'])):
# New flow does not exist in the old flow stats, append it
print "Old flows to delete: ", self.old_queue_list[j]
to_delete.append(j)
continue
self.create_individual_queues(self.complete_interface_list[0]['name'], to_create)
self.delete_individual_queue(self.complete_interface_list[0]['name'], to_delete)
self.report_object.queues_ready(self.complete_interface_list[0],controller_message['bw_list'], self.old_queue_list)
self.lock.release()
def set_queue_bw(self, interface_name, queue_index, bw):
#ovs-vsctl set Queue e059add5-ea8d-4c05-a9be-895ab217d2b4 other-config:max-rate=99
print "Giving bw of ", bw
command = 'ovs-vsctl set Queue ' + self.old_queue_list[queue_index]['uuid'] +' other-config:max-rate=' + str(bw)
subprocess.check_output(command, shell=True)
self.old_queue_list[queue_index]['bw'] = bw
def delete_a_queue(self, a_queue):
for i in range(len(self.old_queue_list)):
if (self.old_queue_list[i]['nw_src'] == a_queue['nw_src']) and (self.old_queue_list[i]['nw_dst'] == a_queue['nw_dst']):
delete_index = i
break
command = 'ovs-vsctl remove QoS ' + self.qos_register['uuid'] + ' queues ' + str(self.old_queue_list[delete_index]['queueId'])
subprocess.check_output(command, shell=True)
command = 'ovs-vsctl destroy queue ' + str(self.old_queue_list[delete_index]['uuid'])
subprocess.check_output(command, shell=True)
self.queues_ids.remove(self.old_queue_list[delete_index]['queueId'])
del self.old_queue_list[delete_index]
def delete_individual_queue(self, interface_name, to_delete):
for i in range(len(to_delete)):
command = 'ovs-vsctl list Queue ' + '| grep ' + str(self.old_queue_list[to_delete[i]]['uuid'])
result = subprocess.check_output(command, shell=True).split('\n')[0]
print "Grep command result: ", result
if not result:
continue
command = 'ovs-vsctl remove QoS ' + self.qos_register['uuid'] + ' queues ' + str(self.old_queue_list[to_delete[i]]['queueId'])
subprocess.check_output(command, shell=True)
command = 'ovs-vsctl destroy queue ' + str(self.old_queue_list[to_delete[i]]['uuid'])
subprocess.check_output(command, shell=True)
self.queues_ids.remove(self.old_queue_list[to_delete[i]]['queueId'])
removeset = set(to_delete)
newlist = [v for k, v in enumerate(self.old_queue_list) if k not in removeset]
del self.old_queue_list[:]
for j in range(len(newlist)):
self.old_queue_list.append(newlist[j])
def create_individual_queues(self, interface_name, to_create):
#queue_list = []
#print "creating queues: ", to_create
for i in range(len(to_create)):
a_queue_dict = dict.fromkeys(['uuid', 'queueId', 'nw_src', 'nw_dst', 'bw'])
a = 0
while True:
if a not in self.queues_ids:
self.queues_ids.append(a)
break
else:
a = a +1
command = 'ovs-vsctl create Queue other-config:max-rate=' + str(to_create[i]['bw'])
an_uuid = subprocess.check_output(command, shell=True).split('\n')[0]
command = 'ovs-vsctl add Qos ' + self.qos_register['uuid'] + ' queues ' + str(a) + '=' + an_uuid
subprocess.check_output(command, shell=True)
a_queue_dict['uuid'] = an_uuid
a_queue_dict['queueId'] = a
a_queue_dict['nw_src'] = to_create[i]['nw_src']
a_queue_dict['nw_dst'] = to_create[i]['nw_dst']
a_queue_dict['bw'] = to_create[i]['bw']
self.old_queue_list.append(a_queue_dict)
def create_qos_register(self, interface_name):
#ovs-vsctl -- set Port eth0br qos=@fenceqos -- --id=@fenceqos create QoS type=linux-htb
#self.qos_register = dict.fromkeys(['uuid','port', 'id', 'min-rate', 'max-rate'] )
command = 'ovs-vsctl -- set Port ' + interface_name + ' qos=@fenceqos -- --id=@fenceqos create QoS type=linux-htb'
self.qos_register['uuid'] = subprocess.check_output(command, shell=True).split('\n')[0]
self.qos_register['port'] = interface_name
self.qos_register['id'] = 'fenceqos'
self.qos_register['max-rate'] = '900000000'
#ovs-vsctl set Qos 016d2315-6305-4692-ae89-c2a3e680e874 other-config:max-rate=1000000
print "QoS uuid: ", self.qos_register['uuid']
command = 'ovs-vsctl set Qos ' + self.qos_register['uuid'] + ' other-config:max-rate=900000000'
subprocess.check_output(command, shell=True)
def create_queues(self, controller_message):
""" Creates the QoS queues, one queue is created for each flow """
self.clear_queues(controller_message)
self.queues_creation_time = time.time()
self.complete_interface_list[0]['queueList']=self.init_queues(self.complete_interface_list[0]['name'],controller_message['bw_list'])
self.set_queues_bw(self.complete_interface_list[0]['queueList'])
self.report_object.queues_ready(self.complete_interface_list[0],controller_message['bw_list'],self.complete_interface_list[0]['queueList'])
self.queues_complete_time = time.time()
#print "Startup time: " + str(self.startup_time)
#print "Detection time: " + str(self.detection_time)
#print "Queues creation time: " + str(self.queues_creation_time)
#print "Queues complete time: " + str(self.queues_complete_time)
@classmethod
def init_queues(cls, interface_name, bw_list):
""" Inits the QoS queues """
#print "Initing queues for: " + str(interface_name)
queues_list=[]
qos_string='ovs-vsctl -- set Port ' + interface_name + ' qos=@fenceqos -- --id=@fenceqos create qos type=linux-htb other-config:max-rate=900000000'
queues_string=''
for j in range(len(bw_list)):
a_queue_dict=dict.fromkeys(['queueId','queueuuid','nw_src','nw_dst','bw'])
a_queue_dict['queueId']=j
a_queue_dict['nw_src']=bw_list[j]['nw_src']
a_queue_dict['nw_dst']=bw_list[j]['nw_dst']
a_queue_dict['bw'] = bw_list[j]['bw']
a_queue= str(a_queue_dict['queueId']) +'=@queue' + str(a_queue_dict['queueId'])
if j < len(bw_list) - 1:
a_queue = a_queue + ','
queues_string=queues_string+a_queue
queues_list.append(a_queue_dict)
queues_string='queues='+ queues_string
queues_creation=''
for j in range(len(bw_list)):
a_creation='-- --id=@queue' + str(queues_list[j]['queueId']) + ' create Queue other-config:max-rate=100000000 '
queues_creation=queues_creation+a_creation
command=qos_string + ' ' + queues_string + ' ' + queues_creation
#print "Queue command: \n " + str(command)
subprocess.check_output(command, shell=True)
# Getting uuid of each queue
queues_string = subprocess.check_output("ovs-vsctl list Queue", shell=True)
#print "Queues Ready: " + str(queues_string)
allqueues_string = subprocess.check_output("ovs-vsctl list QoS | grep queues", shell=True)
for j in range(len(queues_list)):
queues_list[j]['queueuuid']=allqueues_string.split(":")[1].split(",")[j].split("=")[1].split('}\n')[0].strip()
return queues_list
@classmethod
def set_queues_bw(cls, queues_list):
""" Sets the queue bw, according to the policy defined by the SDN controller """
for i in range(len(queues_list)):
subprocess.check_output("ovs-vsctl set queue " + queues_list[i]['queueuuid'] + " other-config:max-rate="+str(queues_list[i]['bw']), shell=True)
def ema(self, a_bar, series, period, prevma, smoothing=None):
'''Returns the Exponential Moving Average of a series.
Keyword arguments:
a_bar -- currrent index or location of the series
series -- series of values to be averaged
period -- number of values in the series to average
prevma -- previous exponential moving average
smoothing -- smoothing factor to use in the series.
valid values: between 0 & 1.
default: None - which then uses formula = 2.0 / (period + 1.0)
closer to 1 to gives greater weight to recent values - less smooth
closer to 0 gives greater weight to older values -- more smooth
'''
smoothing = 0.8
if a_bar[0] <= 0:
return series[0]
elif a_bar[0] < period:
return self.cumulative_sma(a_bar[0], series, prevma)
return prevma + smoothing * (series[a_bar[0]] - prevma)
@classmethod
def cumulative_sma(cls, a_bar, series, prevma):
"""
Returns the cumulative or unweighted simple moving average.
Avoids averaging the entire series on each call.
Keyword arguments:
a_bar -- current index or location of the value in the series
series -- list or tuple of data to average
prevma -- previous average (n - 1) of the series.
"""
if a_bar[0] <= 0:
return series[0]
else:
return prevma + ((series[a_bar[0]] - prevma) / (a_bar[0] + 1.0))
def get_sample(self, interval_time=1.0):
""" Obtains a sample of the interface occupation in bytes/s """
samples_list=[]
for j in range(len(self.complete_interface_list)):
sample_dict=dict.fromkeys(['interface_name'],['sample'])
samples_list.append(sample_dict)
#lists to Store first and second sample value of each interface
# Each value of a and b represents a sample taken in each interface
sample_1 = []
sample_2 = []
for j in range(len(self.complete_interface_list)):
sample_1.append((float(subprocess.check_output("cat /proc/net/dev | grep " + self.complete_interface_list[j]['name'] + " | awk '{print $10;}'", shell=True).split('\n')[0])))
time.sleep(interval_time)
for j in range(len(self.complete_interface_list)):
sample_2.append((float(subprocess.check_output("cat /proc/net/dev | grep " + self.complete_interface_list[j]['name'] + " | awk '{print $10;}'", shell=True).split('\n')[0])))
samples_list[j]['name'] = self.complete_interface_list[j]['name']
#samples_list[j]['sample']=((b[j]-a[j])/1048576) In MBytes
samples_list[j]['sample']=sample_2[j]-sample_1[j]
return samples_list
if __name__ == "__main__":
SOME_SAMPLES = 10
PERIOD = 3 #number of bars to average
AN_INTERVAL_TIME = 1.0
#toDo: Handle this as a percentage of total link capacity
AN_UPPER_LIMIT = 0.4
LOWER_LIMIT = 0.41
USE_AVERAGES = deque( maxlen=SOME_SAMPLES )
CODE = FlowMonitor_3(SOME_SAMPLES, AN_INTERVAL_TIME, AN_UPPER_LIMIT)
CODE.start_monitoring()
| gpl-3.0 | -1,708,857,641,622,710,000 | 39.784355 | 243 | 0.650044 | false |
dls-controls/pymalcolm | malcolm/modules/scanning/parts/unrollingpart.py | 1 | 1818 | from annotypes import add_call_types
from scanpointgenerator import CompoundGenerator, SquashingExcluder
from malcolm.core import Part, PartRegistrar
from ..hooks import AAxesToMove, AGenerator, UParameterTweakInfos, ValidateHook
from ..infos import ParameterTweakInfo
class UnrollingPart(Part):
"""Unroll the dimensions described by axesToMove into one long line by
inserting a SquashingExcluder into the generator. This is used for instance
in Odin to unroll a snake scan into a long line so the VDS is performant
"""
# This will be serialized, so maintain camelCase for axesToMove
# noinspection PyPep8Naming
@add_call_types
def on_validate(
self, generator: AGenerator, axesToMove: AAxesToMove
) -> UParameterTweakInfos:
if len(axesToMove) in (0, 1):
# We can't have multiple dimensions here, so this must be ok
return None
# Check that we have a Squashing excluder in the generator which
# contains all the axesToMove
for excluder in generator.excluders:
if isinstance(excluder, SquashingExcluder) and set(excluder.axes) == set(
axesToMove
):
# We have already squashed the axes, so nothing to do
return None
# We need to squash any dimension containing axesToMove down
serialized = dict(generator.to_dict())
serialized["excluders"] = list(serialized["excluders"]) + [
SquashingExcluder(axes=axesToMove)
]
new_generator = CompoundGenerator.from_dict(serialized)
return ParameterTweakInfo("generator", new_generator)
def setup(self, registrar: PartRegistrar) -> None:
super().setup(registrar)
# Hooks
registrar.hook(ValidateHook, self.on_validate)
| apache-2.0 | 385,315,478,255,259,300 | 40.318182 | 85 | 0.683718 | false |
materialsproject/pymatgen | pymatgen/transformations/tests/test_site_transformations.py | 1 | 13772 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import numpy as np
from monty.os.path import which
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure, Molecule
from pymatgen.transformations.site_transformations import (
AddSitePropertyTransformation,
InsertSitesTransformation,
PartialRemoveSitesTransformation,
RemoveSitesTransformation,
ReplaceSiteSpeciesTransformation,
TranslateSitesTransformation,
RadialSiteDistortionTransformation,
)
from pymatgen.util.testing import PymatgenTest
enum_cmd = which("enum.x") or which("multienum.x")
makestr_cmd = which("makestr.x") or which("makeStr.x") or which("makeStr.py")
enumlib_present = enum_cmd and makestr_cmd
class TranslateSitesTransformationTest(PymatgenTest):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([0.5, 0.5, 0.5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
self.struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "O2-", "O2-", "O2-", "O2-"], coords)
def test_apply_transformation(self):
t = TranslateSitesTransformation([0, 1], [0.1, 0.2, 0.3])
s = t.apply_transformation(self.struct)
self.assertTrue(np.allclose(s[0].frac_coords, [0.1, 0.2, 0.3]))
self.assertTrue(np.allclose(s[1].frac_coords, [0.475, 0.575, 0.675]))
inv_t = t.inverse
s = inv_t.apply_transformation(s)
self.assertAlmostEqual(s[0].distance_and_image_from_frac_coords([0, 0, 0])[0], 0)
self.assertTrue(np.allclose(s[1].frac_coords, [0.375, 0.375, 0.375]))
def test_apply_transformation_site_by_site(self):
t = TranslateSitesTransformation([0, 1], [[0.1, 0.2, 0.3], [-0.075, -0.075, -0.075]])
s = t.apply_transformation(self.struct)
self.assertTrue(np.allclose(s[0].frac_coords, [0.1, 0.2, 0.3]))
self.assertTrue(np.allclose(s[1].frac_coords, [0.3, 0.3, 0.3]))
inv_t = t.inverse
s = inv_t.apply_transformation(s)
self.assertAlmostEqual(s[0].distance_and_image_from_frac_coords([0, 0, 0])[0], 0)
self.assertArrayAlmostEqual(s[1].frac_coords, [0.375, 0.375, 0.375])
def test_to_from_dict(self):
d1 = TranslateSitesTransformation([0], [0.1, 0.2, 0.3]).as_dict()
d2 = TranslateSitesTransformation([0, 1], [[0.1, 0.2, 0.3], [-0.075, -0.075, -0.075]]).as_dict()
t1 = TranslateSitesTransformation.from_dict(d1)
t2 = TranslateSitesTransformation.from_dict(d2)
s1 = t1.apply_transformation(self.struct)
s2 = t2.apply_transformation(self.struct)
self.assertTrue(np.allclose(s1[0].frac_coords, [0.1, 0.2, 0.3]))
self.assertTrue(np.allclose(s2[0].frac_coords, [0.1, 0.2, 0.3]))
self.assertTrue(np.allclose(s2[1].frac_coords, [0.3, 0.3, 0.3]))
str(t1)
str(t2)
class ReplaceSiteSpeciesTransformationTest(unittest.TestCase):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([0.5, 0.5, 0.5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
self.struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "O2-", "O2-", "O2-", "O2-"], coords)
def test_apply_transformation(self):
t = ReplaceSiteSpeciesTransformation({0: "Na"})
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Na1 Li3 O4")
def test_to_from_dict(self):
d = ReplaceSiteSpeciesTransformation({0: "Na"}).as_dict()
t = ReplaceSiteSpeciesTransformation.from_dict(d)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Na1 Li3 O4")
class RemoveSitesTransformationTest(unittest.TestCase):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([0.5, 0.5, 0.5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
self.struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "O2-", "O2-", "O2-", "O2-"], coords)
def test_apply_transformation(self):
t = RemoveSitesTransformation(range(2))
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O4")
def test_to_from_dict(self):
d = RemoveSitesTransformation(range(2)).as_dict()
t = RemoveSitesTransformation.from_dict(d)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O4")
class InsertSitesTransformationTest(unittest.TestCase):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([0.5, 0.5, 0.5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
self.struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "O2-", "O2-", "O2-", "O2-"], coords)
def test_apply_transformation(self):
t = InsertSitesTransformation(["Fe", "Mn"], [[0.0, 0.5, 0], [0.5, 0.2, 0.2]])
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li4 Mn1 Fe1 O4")
t = InsertSitesTransformation(["Fe", "Mn"], [[0.001, 0, 0], [0.1, 0.2, 0.2]])
# Test validate proximity
self.assertRaises(ValueError, t.apply_transformation, self.struct)
def test_to_from_dict(self):
d = InsertSitesTransformation(["Fe", "Mn"], [[0.5, 0, 0], [0.1, 0.5, 0.2]]).as_dict()
t = InsertSitesTransformation.from_dict(d)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li4 Mn1 Fe1 O4")
class PartialRemoveSitesTransformationTest(unittest.TestCase):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([0.5, 0.5, 0.5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
self.struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "O2-", "O2-", "O2-", "O2-"], coords)
def test_apply_transformation_complete(self):
t = PartialRemoveSitesTransformation(
[tuple(range(4)), tuple(range(4, 8))],
[0.5, 0.5],
PartialRemoveSitesTransformation.ALGO_COMPLETE,
)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O2")
s = t.apply_transformation(self.struct, 12)
self.assertEqual(len(s), 12)
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
def test_apply_transformation_enumerate(self):
t = PartialRemoveSitesTransformation(
[tuple(range(4)), tuple(range(4, 8))],
[0.5, 0.5],
PartialRemoveSitesTransformation.ALGO_ENUMERATE,
)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O2")
s = t.apply_transformation(self.struct, 12)
self.assertEqual(len(s), 12)
def test_apply_transformation_best_first(self):
t = PartialRemoveSitesTransformation(
[tuple(range(4)), tuple(range(4, 8))],
[0.5, 0.5],
PartialRemoveSitesTransformation.ALGO_BEST_FIRST,
)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O2")
def test_apply_transformation_fast(self):
t = PartialRemoveSitesTransformation(
[tuple(range(4)), tuple(range(4, 8))],
[0.5, 0.5],
PartialRemoveSitesTransformation.ALGO_FAST,
)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O2")
t = PartialRemoveSitesTransformation([tuple(range(8))], [0.5], PartialRemoveSitesTransformation.ALGO_FAST)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O2")
def test_to_from_dict(self):
d = PartialRemoveSitesTransformation([tuple(range(4))], [0.5]).as_dict()
t = PartialRemoveSitesTransformation.from_dict(d)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O4")
def test_str(self):
d = PartialRemoveSitesTransformation([tuple(range(4))], [0.5]).as_dict()
self.assertIsNotNone(str(d))
class AddSitePropertyTransformationTest(PymatgenTest):
def test_apply_transformation(self):
s = self.get_structure("Li2O2")
sd = [[True, True, True] for site in s.sites]
bader = np.random.random(s.num_sites).tolist()
site_props = {"selective_dynamics": sd, "bader": bader}
trans = AddSitePropertyTransformation(site_props)
manually_set = s.copy()
for prop, value in site_props.items():
manually_set.add_site_property(prop, value)
trans_set = trans.apply_transformation(s)
for prop in site_props:
self.assertArrayAlmostEqual(trans_set.site_properties[prop], manually_set.site_properties[prop])
class RadialSiteDistortionTransformationTest(PymatgenTest):
def setUp(self):
self.molecule = Molecule(
species=["C", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H"],
coords=[
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[3, 0, 0],
[0, 3, 0],
[0, 0, 3],
[-3, 0, 0],
[0, -3, 0],
[0, 0, -3],
],
)
self.structure = Structure(
species=["C", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H"],
coords=[
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[3, 0, 0],
[0, 3, 0],
[0, 0, 3],
[-3, 0, 0],
[0, -3, 0],
[0, 0, -3],
],
lattice=[[10, 0, 0], [0, 10, 0], [0, 0, 10]],
coords_are_cartesian=True,
)
def test(self):
t = RadialSiteDistortionTransformation(0, 1, nn_only=True)
s = t.apply_transformation(self.molecule)
self.assertTrue(np.array_equal(s[0].coords, [0, 0, 0]))
self.assertTrue(np.array_equal(s[1].coords, [2, 0, 0]))
self.assertTrue(np.array_equal(s[2].coords, [0, 2, 0]))
self.assertTrue(np.array_equal(s[3].coords, [0, 0, 2]))
self.assertTrue(np.array_equal(s[4].coords, [-2, 0, 0]))
self.assertTrue(np.array_equal(s[5].coords, [0, -2, 0]))
self.assertTrue(np.array_equal(s[6].coords, [0, 0, -2]))
t = RadialSiteDistortionTransformation(0, 1, nn_only=True)
s = t.apply_transformation(self.structure)
for c1, c2 in zip(self.structure[1:7], s[1:7]):
self.assertTrue(c1.distance(c2) == 1.0)
self.assertTrue(np.array_equal(s[0].coords, [0, 0, 0]))
self.assertTrue(np.array_equal(s[1].coords, [2, 0, 0]))
self.assertTrue(np.array_equal(s[2].coords, [0, 2, 0]))
self.assertTrue(np.array_equal(s[3].coords, [0, 0, 2]))
self.assertTrue(np.array_equal(s[4].coords, [8, 0, 0]))
self.assertTrue(np.array_equal(s[5].coords, [0, 8, 0]))
self.assertTrue(np.array_equal(s[6].coords, [0, 0, 8]))
def test_second_nn(self):
t = RadialSiteDistortionTransformation(0, 1, nn_only=False)
s = t.apply_transformation(self.molecule)
for c1, c2 in zip(self.molecule[7:], s[7:]):
self.assertEqual(abs(round(sum(c2.coords - c1.coords), 2)), 0.33)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| mit | 399,329,259,035,847,360 | 38.236467 | 114 | 0.556346 | false |
joehandzik/libstoragemgmt-1 | python_binding/lsm/_data.py | 1 | 36835 | # Copyright (C) 2011-2016 Red Hat, Inc.
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; If not, see <http://www.gnu.org/licenses/>.
#
# Author: tasleson
# Gris Ge <[email protected]>
# Joe Handzik <[email protected]>
from abc import ABCMeta as _ABCMeta
import re
try:
import simplejson as json
except ImportError:
import json
from json.decoder import WHITESPACE
from _common import get_class, default_property, ErrorNumber, LsmError
class DataEncoder(json.JSONEncoder):
"""
Custom json encoder for objects derived form ILsmData
"""
def default(self, my_class):
if not isinstance(my_class, IData):
raise ValueError('incorrect class type:' + str(type(my_class)))
else:
return my_class._to_dict()
class DataDecoder(json.JSONDecoder):
"""
Custom json decoder for objects derived from ILsmData
"""
@staticmethod
def __process_dict(d):
"""
Processes a dictionary
"""
rc = {}
if 'class' in d:
rc = IData._factory(d)
else:
for (k, v) in d.iteritems():
rc[k] = DataDecoder.__decode(v)
return rc
@staticmethod
def __process_list(l):
"""
Processes a list
"""
rc = []
for elem, value in enumerate(l):
if type(value) is list:
rc.append(DataDecoder.__process_list(value))
elif type(value) is dict:
rc.append(DataDecoder.__process_dict(value))
else:
rc.append(value)
return rc
@staticmethod
def __decode(e):
"""
Decodes the parsed json
"""
if type(e) is dict:
return DataDecoder.__process_dict(e)
elif type(e) is list:
return DataDecoder.__process_list(e)
else:
return e
def decode(self, json_string, _w=WHITESPACE.match):
return DataDecoder.__decode(json.loads(json_string))
class IData(object):
"""
Base class functionality of serializable
classes.
"""
__metaclass__ = _ABCMeta
def _to_dict(self):
"""
Represent the class as a dictionary
"""
rc = {'class': self.__class__.__name__}
# If one of the attributes is another IData we will
# process that too, is there a better way to handle this?
for (k, v) in self.__dict__.items():
if isinstance(v, IData):
rc[k[1:]] = v._to_dict()
else:
rc[k[1:]] = v
return rc
@staticmethod
def _factory(d):
"""
Factory for creating the appropriate class given a dictionary.
This only works for objects that inherit from IData
"""
if 'class' in d:
class_name = d['class']
del d['class']
c = get_class(__name__ + '.' + class_name)
# If any of the parameters are themselves an IData process them
for k, v in d.items():
if isinstance(v, dict) and 'class' in v:
d['_' + k] = IData._factory(d.pop(k))
else:
d['_' + k] = d.pop(k)
return c(**d)
def __str__(self):
"""
Used for human string representation.
"""
return str(self._to_dict())
@default_property('id', doc="Unique identifier")
@default_property('name', doc="Disk name (aka. vendor)")
@default_property('disk_type', doc="Enumerated type of disk")
@default_property('block_size', doc="Size of each block")
@default_property('num_of_blocks', doc="Total number of blocks")
@default_property('status', doc="Enumerated status")
@default_property('system_id', doc="System identifier")
@default_property("plugin_data", doc="Private plugin data")
class Disk(IData):
"""
Represents a disk.
"""
SUPPORTED_SEARCH_KEYS = ['id', 'system_id']
# We use '-1' to indicate we failed to get the requested number.
# For example, when block found is undetectable, we use '-1' instead of
# confusing 0.
BLOCK_COUNT_NOT_FOUND = -1
BLOCK_SIZE_NOT_FOUND = -1
TYPE_UNKNOWN = 0
TYPE_OTHER = 1
TYPE_ATA = 3 # IDE disk which is seldomly used.
TYPE_SATA = 4
TYPE_SAS = 5
TYPE_FC = 6
TYPE_SOP = 7 # SCSI over PCIe(SSD)
TYPE_SCSI = 8
TYPE_LUN = 9 # Remote LUN was treated as a disk.
# Due to complesity of disk types, we are defining these beside DMTF
# standards:
TYPE_NL_SAS = 51 # Near-Line SAS==SATA disk + SAS port.
# in DMTF CIM 2.34.0+ CIM_DiskDrive['DiskType'], they also defined
# SSD and HYBRID disk type. We use it as faillback.
TYPE_HDD = 52 # Normal HDD
TYPE_SSD = 53 # Solid State Drive
TYPE_HYBRID = 54 # uses a combination of HDD and SSD
STATUS_UNKNOWN = 1 << 0
STATUS_OK = 1 << 1
STATUS_OTHER = 1 << 2
STATUS_PREDICTIVE_FAILURE = 1 << 3
STATUS_ERROR = 1 << 4
STATUS_REMOVED = 1 << 5
STATUS_STARTING = 1 << 6
STATUS_STOPPING = 1 << 7
STATUS_STOPPED = 1 << 8
STATUS_INITIALIZING = 1 << 9
STATUS_MAINTENANCE_MODE = 1 << 10
# In maintenance for bad sector scan, integerity check and etc
# It might be combined with STATUS_OK or
# STATUS_STOPPED for online maintenance or offline maintenance.
STATUS_SPARE_DISK = 1 << 11
# Indicate disk is a spare disk.
STATUS_RECONSTRUCT = 1 << 12
# Indicate disk is reconstructing data.
STATUS_FREE = 1 << 13
# New in version 1.2, indicate the whole disk is not holding any data or
# acting as a dedicate spare disk.
# This disk could be assigned as a dedicated spare disk or used for
# creating pool.
# If any spare disk(like those on NetApp ONTAP) does not require
# any explicit action when assigning to pool, it should be treated as
# free disk and marked as STATUS_FREE|STATUS_SPARE_DISK.
RPM_NO_SUPPORT = -2
RPM_UNKNOWN = -1
RPM_NON_ROTATING_MEDIUM = 0
RPM_ROTATING_UNKNOWN_SPEED = 1
LINK_TYPE_NO_SUPPORT = -2
LINK_TYPE_UNKNOWN = -1
LINK_TYPE_FC = 0
LINK_TYPE_SSA = 2
LINK_TYPE_SBP = 3
LINK_TYPE_SRP = 4
LINK_TYPE_ISCSI = 5
LINK_TYPE_SAS = 6
LINK_TYPE_ADT = 7
LINK_TYPE_ATA = 8
LINK_TYPE_USB = 9
LINK_TYPE_SOP = 10
LINK_TYPE_PCIE = 11
def __init__(self, _id, _name, _disk_type, _block_size, _num_of_blocks,
_status, _system_id, _plugin_data=None, _vpd83='',
_location='', _rpm=RPM_NO_SUPPORT,
_link_type=LINK_TYPE_NO_SUPPORT):
self._id = _id
self._name = _name
self._disk_type = _disk_type
self._block_size = _block_size
self._num_of_blocks = _num_of_blocks
self._status = _status
self._system_id = _system_id
self._plugin_data = _plugin_data
if _vpd83 and not Volume.vpd83_verify(_vpd83):
raise LsmError(ErrorNumber.INVALID_ARGUMENT,
"Incorrect format of VPD 0x83 NAA(3) string: '%s', "
"expecting 32 or 16 lower case hex characters" %
_vpd83)
self._vpd83 = _vpd83
self._location = _location
self._rpm = _rpm
self._link_type = _link_type
@property
def size_bytes(self):
"""
Disk size in bytes.
"""
return self.block_size * self.num_of_blocks
@property
def vpd83(self):
"""
String. SCSI VPD83 ID. New in version 1.3.
Only available for DAS(direct attached storage) systems.
The VPD83 ID could be used in 'lsm.SCSI.disk_paths_of_vpd83()'
when physical disk is exposed to OS directly.
"""
if self._vpd83 == '':
raise LsmError(
ErrorNumber.NO_SUPPORT,
"Disk.vpd83 is not supported by current disk or plugin")
return self._vpd83
@property
def location(self):
"""
String. Disk location in storage topology. New in version 1.3.
"""
if self._location == '':
raise LsmError(ErrorNumber.NO_SUPPORT,
"Disk.location property is not supported by this "
"plugin yet")
return self._location
@property
def rpm(self):
"""
Integer. New in version 1.3.
Disk rotation speed - revolutions per minute(RPM):
-1 (LSM_DISK_RPM_UNKNOWN):
Unknown RPM
0 (LSM_DISK_RPM_NON_ROTATING_MEDIUM):
Non-rotating medium (e.g., SSD)
1 (LSM_DISK_RPM_ROTATING_UNKNOWN_SPEED):
Rotational disk with unknown speed
>1:
Normal rotational disk (e.g., HDD)
"""
if self._rpm == Disk.RPM_NO_SUPPORT:
raise LsmError(ErrorNumber.NO_SUPPORT,
"Disk.rpm is not supported by this plugin yet")
return self._rpm
@property
def link_type(self):
"""
Integer. New in version 1.3.
Link type, possible values are:
lsm.Disk.LINK_TYPE_UNKNOWN
Failed to detect link type
lsm.Disk.LINK_TYPE_FC
Fibre Channel
lsm.Disk.LINK_TYPE_SSA
Serial Storage Architecture, Old IBM tech.
lsm.Disk.LINK_TYPE_SBP
Serial Bus Protocol, used by IEEE 1394.
lsm.Disk.LINK_TYPE_SRP
SCSI RDMA Protocol
lsm.Disk.LINK_TYPE_ISCSI
Internet Small Computer System Interface
lsm.Disk.LINK_TYPE_SAS
Serial Attached SCSI
lsm.Disk.LINK_TYPE_ADT
Automation/Drive Interface Transport
Protocol, often used by Tape.
lsm.Disk.LINK_TYPE_ATA
PATA/IDE or SATA.
lsm.Disk.LINK_TYPE_USB
USB disk.
lsm.Disk.LINK_TYPE_SOP
SCSI over PCI-E
lsm.Disk.LINK_TYPE_PCIE
PCI-E, e.g. NVMe
"""
if self._link_type == Disk.LINK_TYPE_NO_SUPPORT:
raise LsmError(ErrorNumber.NO_SUPPORT,
"Disk.link_type is not supported by this plugin "
"yet")
return self._link_type
def __str__(self):
return self.name
# Lets do this once outside of the class to minimize the number of
# times it needs to be compiled.
_vol_regex_vpd83 = re.compile('(?:^6[0-9a-f]{31})|(?:^[235][0-9a-f]{15})$')
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User given name")
@default_property('vpd83', doc="Vital product page 0x83 identifier")
@default_property('block_size', doc="Volume block size")
@default_property('num_of_blocks', doc="Number of blocks")
@default_property('admin_state', doc="Enabled or disabled by administrator")
@default_property('system_id', doc="System identifier")
@default_property('pool_id', doc="Pool identifier")
@default_property("plugin_data", doc="Private plugin data")
class Volume(IData):
"""
Represents a volume.
"""
SUPPORTED_SEARCH_KEYS = ['id', 'system_id', 'pool_id']
# Replication types
REPLICATE_UNKNOWN = -1
REPLICATE_CLONE = 2
REPLICATE_COPY = 3
REPLICATE_MIRROR_SYNC = 4
REPLICATE_MIRROR_ASYNC = 5
# Provisioning types
PROVISION_UNKNOWN = -1
PROVISION_THIN = 1
PROVISION_FULL = 2
PROVISION_DEFAULT = 3
ADMIN_STATE_DISABLED = 0
ADMIN_STATE_ENABLED = 1
RAID_TYPE_UNKNOWN = -1
# The plugin failed to detect the volume's RAID type.
RAID_TYPE_RAID0 = 0
# Stripe
RAID_TYPE_RAID1 = 1
# Mirror for two disks. For 4 disks or more, they are RAID10.
RAID_TYPE_RAID3 = 3
# Byte-level striping with dedicated parity
RAID_TYPE_RAID4 = 4
# Block-level striping with dedicated parity
RAID_TYPE_RAID5 = 5
# Block-level striping with distributed parity
RAID_TYPE_RAID6 = 6
# Block-level striping with two distributed parities, aka, RAID-DP
RAID_TYPE_RAID10 = 10
# Stripe of mirrors
RAID_TYPE_RAID15 = 15
# Parity of mirrors
RAID_TYPE_RAID16 = 16
# Dual parity of mirrors
RAID_TYPE_RAID50 = 50
# Stripe of parities
RAID_TYPE_RAID60 = 60
# Stripe of dual parities
RAID_TYPE_RAID51 = 51
# Mirror of parities
RAID_TYPE_RAID61 = 61
# Mirror of dual parities
RAID_TYPE_JBOD = 20
# Just bunch of disks, no parity, no striping.
RAID_TYPE_MIXED = 21
# This volume contains multiple RAID settings.
RAID_TYPE_OTHER = 22
# Vendor specific RAID type
STRIP_SIZE_UNKNOWN = 0
DISK_COUNT_UNKNOWN = 0
MIN_IO_SIZE_UNKNOWN = 0
OPT_IO_SIZE_UNKNOWN = 0
VCR_STRIP_SIZE_DEFAULT = 0
WRITE_CACHE_POLICY_UNKNOWN = 1
WRITE_CACHE_POLICY_WRITE_BACK = 2
WRITE_CACHE_POLICY_AUTO = 3
WRITE_CACHE_POLICY_WRITE_THROUGH = 4
WRITE_CACHE_STATUS_UNKNOWN = 1
WRITE_CACHE_STATUS_WRITE_BACK = 2
WRITE_CACHE_STATUS_WRITE_THROUGH = 3
READ_CACHE_POLICY_UNKNOWN = 1
READ_CACHE_POLICY_ENABLED = 2
READ_CACHE_POLICY_DISABLED = 3
READ_CACHE_STATUS_UNKNOWN = 1
READ_CACHE_STATUS_ENABLED = 2
READ_CACHE_STATUS_DISABLED = 3
PHYSICAL_DISK_CACHE_UNKNOWN = 1
PHYSICAL_DISK_CACHE_ENABLED = 2
PHYSICAL_DISK_CACHE_DISABLED = 3
PHYSICAL_DISK_CACHE_USE_DISK_SETTING = 4
def __init__(self, _id, _name, _vpd83, _block_size, _num_of_blocks,
_admin_state, _system_id, _pool_id, _plugin_data=None):
self._id = _id # Identifier
self._name = _name # Human recognisable name
if _vpd83 and not Volume.vpd83_verify(_vpd83):
raise LsmError(ErrorNumber.INVALID_ARGUMENT,
"Incorrect format of VPD 0x83 NAA(3) string: '%s', "
"expecting 32 or 16 lower case hex characters" %
_vpd83)
self._vpd83 = _vpd83 # SCSI page 83 unique ID
self._block_size = _block_size # Block size
self._num_of_blocks = _num_of_blocks # Number of blocks
self._admin_state = _admin_state # enable or disabled by admin
self._system_id = _system_id # System id this volume belongs
self._pool_id = _pool_id # Pool id this volume belongs
self._plugin_data = _plugin_data
@property
def size_bytes(self):
"""
Volume size in bytes.
"""
return self.block_size * self.num_of_blocks
def __str__(self):
return self.name
@staticmethod
def vpd83_verify(vpd):
"""
Returns True if string is valid vpd 0x83 representation
"""
if vpd and _vol_regex_vpd83.match(vpd):
return True
return False
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User defined system name")
@default_property('status', doc="Enumerated status of system")
@default_property('status_info', doc="Detail status information of system")
@default_property("plugin_data", doc="Private plugin data")
class System(IData):
STATUS_UNKNOWN = 1 << 0
STATUS_OK = 1 << 1
STATUS_ERROR = 1 << 2
STATUS_DEGRADED = 1 << 3
STATUS_PREDICTIVE_FAILURE = 1 << 4
STATUS_OTHER = 1 << 5
MODE_NO_SUPPORT = -2
MODE_UNKNOWN = -1
MODE_HARDWARE_RAID = 0
MODE_HBA = 1
READ_CACHE_PCT_NO_SUPPORT = -2
READ_CACHE_PCT_UNKNOWN = -1
def __init__(self, _id, _name, _status, _status_info, _plugin_data=None,
_fw_version='', _mode=None, _read_cache_pct=None):
self._id = _id
self._name = _name
self._status = _status
self._status_info = _status_info
self._plugin_data = _plugin_data
self._fw_version = _fw_version
if _read_cache_pct is None:
self._read_cache_pct = System.READ_CACHE_PCT_NO_SUPPORT
else:
self._read_cache_pct = _read_cache_pct
if _mode is None:
self._mode = System.MODE_NO_SUPPORT
else:
self._mode = _mode
@property
def fw_version(self):
"""
String. Firmware version string. New in version 1.3.
On some system, it might contain multiple version strings, example:
"Package: 23.32.0-0009, FW: 3.440.05-3712"
"""
if self._fw_version == '':
raise LsmError(ErrorNumber.NO_SUPPORT,
"System.fw_version() is not supported by this "
"plugin yet")
return self._fw_version
@property
def mode(self):
"""
Integer(enumerated value). System mode. New in version 1.3.
Only available for HW RAID systems at this time.
Possible values:
* lsm.System.MODE_HARDWARE_RAID
The logical volume(aka, RAIDed virtual disk) can be exposed
to OS while hardware RAID card is handling the RAID
algorithm. Physical disk can not be exposed to OS directly.
* lsm.System.MODE_HBA
The physical disks can be exposed to OS directly.
SCSI enclosure service might be exposed to OS also.
"""
if self._mode == System.MODE_NO_SUPPORT:
raise LsmError(ErrorNumber.NO_SUPPORT,
"System.mode is not supported by this plugin yet")
return self._mode
@property
def read_cache_pct(self):
"""
Integer. Read cache percentage. New in version 1.3.
Possible values:
* 0-100
The read cache percentage. The write cache percentage will
then be 100 - read_cache_pct
"""
if self._read_cache_pct == System.READ_CACHE_PCT_NO_SUPPORT:
raise LsmError(ErrorNumber.NO_SUPPORT,
"System.read_cache_pct is not supported by this "
"plugin yet")
return self._read_cache_pct
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User supplied name")
@default_property('total_space', doc="Total space in bytes")
@default_property('free_space', doc="Free space in bytes")
@default_property('status', doc="Enumerated status")
@default_property('status_info', doc="Text explaining status")
@default_property('system_id', doc="System identifier")
@default_property("plugin_data", doc="Plug-in private data")
@default_property("element_type", doc="What pool can be used for")
@default_property("unsupported_actions",
doc="What cannot be done with this pool")
class Pool(IData):
"""
Pool specific information
"""
SUPPORTED_SEARCH_KEYS = ['id', 'system_id']
TOTAL_SPACE_NOT_FOUND = -1
FREE_SPACE_NOT_FOUND = -1
# Element Type indicate what kind of element could this pool create:
# * Another Pool
# * Volume (aka, LUN)
# * System Reserved Pool.
ELEMENT_TYPE_POOL = 1 << 1
ELEMENT_TYPE_VOLUME = 1 << 2
ELEMENT_TYPE_FS = 1 << 3
ELEMENT_TYPE_DELTA = 1 << 4
ELEMENT_TYPE_VOLUME_FULL = 1 << 5
ELEMENT_TYPE_VOLUME_THIN = 1 << 6
ELEMENT_TYPE_SYS_RESERVED = 1 << 10 # Reserved for system use
# Unsupported actions, what pool cannot be used for
UNSUPPORTED_VOLUME_GROW = 1 << 0
UNSUPPORTED_VOLUME_SHRINK = 1 << 1
# Pool status could be any combination of these status.
STATUS_UNKNOWN = 1 << 0
STATUS_OK = 1 << 1
STATUS_OTHER = 1 << 2
STATUS_DEGRADED = 1 << 4
STATUS_ERROR = 1 << 5
STATUS_STOPPED = 1 << 9
STATUS_RECONSTRUCTING = 1 << 12
STATUS_VERIFYING = 1 << 13
STATUS_INITIALIZING = 1 << 14
STATUS_GROWING = 1 << 15
MEMBER_TYPE_UNKNOWN = 0
MEMBER_TYPE_OTHER = 1
MEMBER_TYPE_DISK = 2
MEMBER_TYPE_POOL = 3
def __init__(self, _id, _name, _element_type, _unsupported_actions,
_total_space, _free_space,
_status, _status_info, _system_id, _plugin_data=None):
self._id = _id # Identifier
self._name = _name # Human recognisable name
self._element_type = _element_type # What pool can be used to create
self._unsupported_actions = _unsupported_actions
# What pool cannot be used for
self._total_space = _total_space # Total size
self._free_space = _free_space # Free space available
self._status = _status # Status of pool.
self._status_info = _status_info # Additional status text of pool
self._system_id = _system_id # System id this pool belongs
self._plugin_data = _plugin_data # Plugin private data
@default_property('id', doc="Unique identifier")
@default_property('name', doc="File system name")
@default_property('total_space', doc="Total space in bytes")
@default_property('free_space', doc="Free space available")
@default_property('pool_id', doc="What pool the file system resides on")
@default_property('system_id', doc="System ID")
@default_property("plugin_data", doc="Private plugin data")
class FileSystem(IData):
SUPPORTED_SEARCH_KEYS = ['id', 'system_id', 'pool_id']
def __init__(self, _id, _name, _total_space, _free_space, _pool_id,
_system_id, _plugin_data=None):
self._id = _id
self._name = _name
self._total_space = _total_space
self._free_space = _free_space
self._pool_id = _pool_id
self._system_id = _system_id
self._plugin_data = _plugin_data
@default_property('id', doc="Unique identifier")
@default_property('name', doc="Snapshot name")
@default_property('ts', doc="Time stamp the snapshot was created")
@default_property("plugin_data", doc="Private plugin data")
class FsSnapshot(IData):
def __init__(self, _id, _name, _ts, _plugin_data=None):
self._id = _id
self._name = _name
self._ts = int(_ts)
self._plugin_data = _plugin_data
@default_property('id', doc="Unique identifier")
@default_property('fs_id', doc="Filesystem that is exported")
@default_property('export_path', doc="Export path")
@default_property('auth', doc="Authentication type")
@default_property('root', doc="List of hosts with no_root_squash")
@default_property('rw', doc="List of hosts with Read & Write privileges")
@default_property('ro', doc="List of hosts with Read only privileges")
@default_property('anonuid', doc="UID for anonymous user id")
@default_property('anongid', doc="GID for anonymous group id")
@default_property('options', doc="String containing advanced options")
@default_property('plugin_data', doc="Plugin private data")
class NfsExport(IData):
SUPPORTED_SEARCH_KEYS = ['id', 'fs_id']
ANON_UID_GID_NA = -1
ANON_UID_GID_ERROR = -2
def __init__(self, _id, _fs_id, _export_path, _auth, _root, _rw, _ro,
_anonuid, _anongid, _options, _plugin_data=None):
assert (_fs_id is not None)
assert (_export_path is not None)
self._id = _id
self._fs_id = _fs_id # File system exported
self._export_path = _export_path # Export path
self._auth = _auth # Authentication type
self._root = _root # List of hosts with no_root_squash
self._rw = _rw # List of hosts with read/write
self._ro = _ro # List of hosts with read/only
self._anonuid = _anonuid # uid for anonymous user id
self._anongid = _anongid # gid for anonymous group id
self._options = _options # NFS options
self._plugin_data = _plugin_data
@default_property('src_block', doc="Source logical block address")
@default_property('dest_block', doc="Destination logical block address")
@default_property('block_count', doc="Number of blocks")
class BlockRange(IData):
def __init__(self, _src_block, _dest_block, _block_count):
self._src_block = _src_block
self._dest_block = _dest_block
self._block_count = _block_count
@default_property('id', doc="Unique instance identifier")
@default_property('name', doc="Access group name")
@default_property('init_ids', doc="List of initiator IDs")
@default_property('init_type', doc="Initiator type")
@default_property('system_id', doc="System identifier")
@default_property('plugin_data', doc="Plugin private data")
class AccessGroup(IData):
SUPPORTED_SEARCH_KEYS = ['id', 'system_id']
INIT_TYPE_UNKNOWN = 0
INIT_TYPE_OTHER = 1
INIT_TYPE_WWPN = 2
INIT_TYPE_ISCSI_IQN = 5
INIT_TYPE_ISCSI_WWPN_MIXED = 7
def __init__(self, _id, _name, _init_ids, _init_type, _system_id,
_plugin_data=None):
self._id = _id
self._name = _name # AccessGroup name
self._init_ids = AccessGroup._standardize_init_list(_init_ids)
# A list of Initiator ID strings.
self._init_type = _init_type
self._system_id = _system_id # System id this group belongs
self._plugin_data = _plugin_data
@staticmethod
def _standardize_init_list(init_ids):
rc = []
for i in init_ids:
valid, init_type, init_id = AccessGroup.initiator_id_verify(i)
if valid:
rc.append(init_id)
else:
raise LsmError(LsmError.ErrorNumber.INVALID_ARGUMENT,
"Invalid initiator ID %s" % i)
return rc
_regex_wwpn = re.compile(r"""
^(0x|0X)?([0-9A-Fa-f]{2})
(([\.:\-])?[0-9A-Fa-f]{2}){7}$
""", re.X)
@staticmethod
def initiator_id_verify(init_id, init_type=None, raise_exception=False):
"""
Public method which can be used to verify an initiator id
:param init_id:
:param init_type:
:param raise_exception: Will throw a LsmError INVALID_ARGUMENT if
not a valid initiator address
:return:(Bool, init_type, init_id) Note: init_id will be returned in
normalized format if it's a WWPN
"""
if init_id.startswith('iqn') or init_id.startswith('eui') or\
init_id.startswith('naa'):
if init_type is None or \
init_type == AccessGroup.INIT_TYPE_ISCSI_IQN:
return True, AccessGroup.INIT_TYPE_ISCSI_IQN, init_id
if AccessGroup._regex_wwpn.match(str(init_id)):
if init_type is None or \
init_type == AccessGroup.INIT_TYPE_WWPN:
return (True, AccessGroup.INIT_TYPE_WWPN,
AccessGroup._wwpn_to_lsm_type(init_id))
if raise_exception:
raise LsmError(ErrorNumber.INVALID_ARGUMENT,
"Initiator id '%s' is invalid" % init_id)
return False, None, None
@staticmethod
def _wwpn_to_lsm_type(wwpn, raise_error=True):
"""
Conver provided WWPN string into LSM standarded one:
LSM WWPN format:
^(?:[0-9a-f]{2}:){7}[0-9a-f]{2}$
LSM WWPN Example:
10:00:00:00:c9:95:2f:de
Acceptable WWPN format is:
^[0x|0X]{0,1}(:?[0-9A-Fa-f]{2}[\.\-:]{0,1}){7}[0-9A-Fa-f]{2}$
Acceptable WWPN example:
10:00:00:00:c9:95:2f:de
10:00:00:00:C9:95:2F:DE
10-00-00-00-C9-95-2F-DE
10-00-00-00-c9-95-2f-de
10.00.00.00.C9.95.2F.DE
10.00.00.00.c9.95.2f.de
0x10000000c9952fde
0X10000000C9952FDE
10000000c9952fde
10000000C9952FDE
Return the LSM WWPN
Return None if raise_error is False and not a valid WWPN.
"""
if AccessGroup._regex_wwpn.match(str(wwpn)):
s = str(wwpn)
s = s.lower()
s = re.sub(r'0x', '', s)
s = re.sub(r'[^0-9a-f]', '', s)
s = ":".join(re.findall(r'..', s))
return s
if raise_error:
raise LsmError(ErrorNumber.INVALID_ARGUMENT,
"Invalid WWPN Initiator: %s" % wwpn)
return None
@default_property('id', doc="Unique instance identifier")
@default_property('port_type', doc="Target port type")
@default_property('service_address', doc="Target port service address")
@default_property('network_address', doc="Target port network address")
@default_property('physical_address', doc="Target port physical address")
@default_property('physical_name', doc="Target port physical port name")
@default_property('system_id', doc="System identifier")
@default_property('plugin_data', doc="Plugin private data")
class TargetPort(IData):
SUPPORTED_SEARCH_KEYS = ['id', 'system_id']
TYPE_OTHER = 1
TYPE_FC = 2
TYPE_FCOE = 3
TYPE_ISCSI = 4
def __init__(self, _id, _port_type, _service_address,
_network_address, _physical_address, _physical_name,
_system_id, _plugin_data=None):
self._id = _id
self._port_type = _port_type
self._service_address = _service_address
# service_address:
# The address used by upper layer like FC and iSCSI:
# FC and FCoE: WWPN
# iSCSI: IQN
# String. Lower case, split with : every two digits if WWPN.
self._network_address = _network_address
# network_address:
# The address used by network layer like FC and TCP/IP:
# FC/FCoE: WWPN
# iSCSI: IPv4:Port
# [IPv6]:Port
# String. Lower case, split with : every two digits if WWPN.
self._physical_address = _physical_address
# physical_address:
# The address used by physical layer like FC-0 and MAC:
# FC: WWPN
# FCoE: WWPN
# iSCSI: MAC
# String. Lower case, split with : every two digits.
self._physical_name = _physical_name
# physical_name
# The name of physical port. Administrator could use this name to
# locate the port on storage system.
# String.
self._system_id = _system_id
self._plugin_data = _plugin_data
class Capabilities(IData):
UNSUPPORTED = 0
SUPPORTED = 1
_NUM = 512 # Indicate the maximum capability integer
_CAP_NUM_BEGIN = 20 # Indicate the first capability integer
# Block operations
VOLUMES = 20
VOLUME_CREATE = 21
VOLUME_RESIZE = 22
VOLUME_REPLICATE = 23
VOLUME_REPLICATE_CLONE = 24
VOLUME_REPLICATE_COPY = 25
VOLUME_REPLICATE_MIRROR_ASYNC = 26
VOLUME_REPLICATE_MIRROR_SYNC = 27
VOLUME_COPY_RANGE_BLOCK_SIZE = 28
VOLUME_COPY_RANGE = 29
VOLUME_COPY_RANGE_CLONE = 30
VOLUME_COPY_RANGE_COPY = 31
VOLUME_DELETE = 33
VOLUME_ENABLE = 34
VOLUME_DISABLE = 35
VOLUME_MASK = 36
VOLUME_UNMASK = 37
ACCESS_GROUPS = 38
ACCESS_GROUP_CREATE_WWPN = 39
ACCESS_GROUP_DELETE = 40
ACCESS_GROUP_INITIATOR_ADD_WWPN = 41
# For empty access group, this indicate it can add WWPN into it.
ACCESS_GROUP_INITIATOR_DELETE = 42
VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP = 43
ACCESS_GROUPS_GRANTED_TO_VOLUME = 44
VOLUME_CHILD_DEPENDENCY = 45
VOLUME_CHILD_DEPENDENCY_RM = 46
ACCESS_GROUP_CREATE_ISCSI_IQN = 47
ACCESS_GROUP_INITIATOR_ADD_ISCSI_IQN = 48
# For empty access group, this indicate it can add iSCSI IQN into it.
VOLUME_ISCSI_CHAP_AUTHENTICATION = 53
VOLUME_RAID_INFO = 54
VOLUME_THIN = 55
BATTERIES = 56
VOLUME_CACHE_INFO = 57
VOLUME_PHYSICAL_DISK_CACHE_UPDATE = 58
VOLUME_PHYSICAL_DISK_CACHE_UPDATE_SYSTEM_LEVEL = 59
VOLUME_WRITE_CACHE_POLICY_UPDATE_WRITE_BACK = 60
VOLUME_WRITE_CACHE_POLICY_UPDATE_AUTO = 61
VOLUME_WRITE_CACHE_POLICY_UPDATE_WRITE_THROUGH = 62
VOLUME_WRITE_CACHE_POLICY_UPDATE_IMPACT_READ = 63
VOLUME_WRITE_CACHE_POLICY_UPDATE_WB_IMPACT_OTHER = 64
VOLUME_READ_CACHE_POLICY_UPDATE = 65
VOLUME_READ_CACHE_POLICY_UPDATE_IMPACT_WRITE = 66
# File system
FS = 100
FS_DELETE = 101
FS_RESIZE = 102
FS_CREATE = 103
FS_CLONE = 104
FILE_CLONE = 105
FS_SNAPSHOTS = 106
FS_SNAPSHOT_CREATE = 107
FS_SNAPSHOT_DELETE = 109
FS_SNAPSHOT_RESTORE = 110
FS_SNAPSHOT_RESTORE_SPECIFIC_FILES = 111
FS_CHILD_DEPENDENCY = 112
FS_CHILD_DEPENDENCY_RM = 113
FS_CHILD_DEPENDENCY_RM_SPECIFIC_FILES = 114
# NFS
EXPORT_AUTH = 120
EXPORTS = 121
EXPORT_FS = 122
EXPORT_REMOVE = 123
EXPORT_CUSTOM_PATH = 124
SYS_READ_CACHE_PCT_UPDATE = 158
SYS_READ_CACHE_PCT_GET = 159
SYS_FW_VERSION_GET = 160
SYS_MODE_GET = 161
DISK_LOCATION = 163
DISK_RPM = 164
DISK_LINK_TYPE = 165
VOLUME_LED = 171
POOLS_QUICK_SEARCH = 210
VOLUMES_QUICK_SEARCH = 211
DISKS_QUICK_SEARCH = 212
ACCESS_GROUPS_QUICK_SEARCH = 213
FS_QUICK_SEARCH = 214
NFS_EXPORTS_QUICK_SEARCH = 215
TARGET_PORTS = 216
TARGET_PORTS_QUICK_SEARCH = 217
DISKS = 220
POOL_MEMBER_INFO = 221
VOLUME_RAID_CREATE = 222
DISK_VPD83_GET = 223
def _to_dict(self):
return {'class': self.__class__.__name__,
'cap': ''.join(['%02x' % b for b in self._cap])}
def __init__(self, _cap=None):
if _cap is not None:
self._cap = bytearray(_cap.decode('hex'))
else:
self._cap = bytearray(Capabilities._NUM)
def supported(self, capability):
return self.get(capability) == Capabilities.SUPPORTED
def get(self, capability):
if capability >= len(self._cap):
return Capabilities.UNSUPPORTED
return self._cap[capability]
@staticmethod
def _lsm_cap_to_str_dict():
"""
Return a dict containing all valid capability:
integer => string name
"""
lsm_cap_to_str_conv = dict()
for c_str, c_int in Capabilities.__dict__.items():
if type(c_str) == str and type(c_int) == int and \
c_str[0] != '_' and \
Capabilities._CAP_NUM_BEGIN <= c_int <= Capabilities._NUM:
lsm_cap_to_str_conv[c_int] = c_str
return lsm_cap_to_str_conv
def get_supported(self, all_cap=False):
"""
Returns a hash of the supported capabilities in the form
constant, name
"""
all_caps = Capabilities._lsm_cap_to_str_dict()
if all_cap:
return all_caps
rc = {}
for i in all_caps.keys():
if self._cap[i] == Capabilities.SUPPORTED:
if i in all_caps:
rc[i] = all_caps[i]
return rc
def set(self, capability, value=SUPPORTED):
self._cap[capability] = value
def enable_all(self):
for i in range(len(self._cap)):
self._cap[i] = Capabilities.SUPPORTED
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User given name")
@default_property('type', doc="Cache hardware type")
@default_property('status', doc='Battery status')
@default_property('system_id', doc="System identifier")
@default_property("plugin_data", doc="Private plugin data")
class Battery(IData):
SUPPORTED_SEARCH_KEYS = ['id', 'system_id']
TYPE_UNKNOWN = 1
TYPE_OTHER = 2
TYPE_CHEMICAL = 3
TYPE_CAPACITOR = 4
STATUS_UNKNOWN = 1 << 0
STATUS_OTHER = 1 << 1
STATUS_OK = 1 << 2
STATUS_DISCHARGING = 1 << 3
STATUS_CHARGING = 1 << 4
STATUS_LEARNING = 1 << 5
STATUS_DEGRADED = 1 << 6
STATUS_ERROR = 1 << 7
def __init__(self, _id, _name, _type, _status, _system_id,
_plugin_data=None):
self._id = _id
self._name = _name
self._type = _type
self._status = _status
self._system_id = _system_id
self._plugin_data = _plugin_data
if __name__ == '__main__':
# TODO Need some unit tests that encode/decode all the types with nested
pass
| lgpl-2.1 | 2,991,822,636,371,551,000 | 33.012004 | 79 | 0.588272 | false |
mattliston/sentiment | train_rel.py | 1 | 5406 | # THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python train_rel.py
from six.moves import cPickle
import numpy as np
import theano
import theano.tensor as T
import lasagne as L
import random
import time
import string
import argparse
# parse command line arguments
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--lr', help='learning rate', default=0.0001, type=float)
parser.add_argument('--epochs', help='number of epochs', default=300, type=int)
parser.add_argument('--batch', help='batch size', default=10, type=int)
parser.add_argument('--debug', default=False, action='store_true')
args = parser.parse_args()
print args
d=open('data.pickle','rb') #load data and labels
data=np.asarray(cPickle.load(d))
data=data.astype(np.float32)
d.close()
l=open('label.pickle','rb')
label=np.asarray(cPickle.load(l))
label=label.astype(np.int32)
l.close()
rng_state = np.random.get_state() #shuffle data and labels
np.random.shuffle(data)
np.random.set_state(rng_state)
np.random.shuffle(label)
for i in range(0,label.size):
if label[i]==-1:
label[i]=2
dist=np.zeros(3)
for i in range(0,label.size):
if label[i]==2: #this is negative change
dist[0]+=1
if label[i]==0:
dist[1]+=1
if label[i]==1:
dist[2]+=1
print 'dist', dist
train_split=0.8 #break into train and test sets
train_data=data[0:int(train_split*data.shape[0])]
train_label=label[0:int(train_split*label.shape[0])]
test_data=data[int(train_split*data.shape[0]):data.shape[0]]
test_label=label[int(train_split*label.shape[0]):label.shape[0]]
train_dist=np.zeros(3)
for i in range(0,train_label.shape[0]):
if train_label[i]==2:
train_dist[0]+=1
if train_label[i]==0:
train_dist[1]+=1
if train_label[i]==1:
train_dist[2]+=1
test_dist=np.zeros(3)
for i in range(0,test_label.shape[0]):
if test_label[i]==2:
test_dist[0]+=1
if test_label[i]==0:
test_dist[1]+=1
if test_label[i]==1:
test_dist[2]+=1
#n_loss = 0
#t_loss = 0
#t_norm = 0
#batch_size=10
#for i in range(0,train_data.shape[0]-batch_size,batch_size):
# reshape=np.reshape(tuple(train_data[i:i+batch_size],train_data[i:i+batch_size].shape)
# input_win=[train_data[i:i+batch_size],train_label[i:i+batch_size]]
# print len(input_win)
# break
#exit(0)
input_var = T.tensor3(dtype=theano.config.floatX) #define network
target_var = T.vector(dtype='int32')
network = L.layers.InputLayer((None,1,99),input_var)
network = L.layers.Conv1DLayer(network,num_filters=75,filter_size=10,stride=9)
network = L.layers.Conv1DLayer(network,num_filters=75,filter_size=10,stride=1)
#network = L.layers.MaxPool1DLayer(network,pool_size=10)
network = L.layers.DenseLayer(network, 50)
network = L.layers.DenseLayer(network, 25)
network = L.layers.DenseLayer(network, 12)
#network = L.layers.DenseLayer(network, 11)
network = L.layers.DenseLayer(network, 3, nonlinearity=L.nonlinearities.softmax)
print L.layers.get_output_shape(network)
prediction = L.layers.get_output(network)
loss = L.objectives.aggregate(L.objectives.categorical_crossentropy(prediction, target_var), mode='mean')
params = L.layers.get_all_params(network, trainable=True)
updates = L.updates.adam(loss, params, learning_rate=args.lr)
scaled_grads,norm = L.updates.total_norm_constraint(T.grad(loss,params), np.inf, return_norm=True)
train_fn = theano.function([input_var, target_var], [loss,norm], updates=updates)
test_fn = theano.function([input_var], L.layers.get_output(network, deterministic=True))
for epoch in range(0,args.epochs):
rng_state = np.random.get_state()
np.random.shuffle(train_data)
np.random.set_state(rng_state)
np.random.shuffle(train_label)
n_loss = 0
t_loss = 0
t_norm = 0
batch_size=args.batch
for i in range(0,train_data.shape[0]-batch_size,batch_size):
reshape=np.reshape(train_data[i:i+batch_size],(batch_size,1,99))
# print i,reshape.shape
# reshape=np.reshape(tuple(train_data[i:i+batch_size],train_data[i:i+batch_size].shape)
b_loss,b_norm = train_fn(reshape,train_label[i:i+batch_size])
t_loss += b_loss
t_norm += b_norm
n_loss += 1.0
correct=0
total=0
hist=np.zeros(3)
for i in range(0,test_data.shape[0]):
# print test_data.shape[0]
reshape=np.reshape(test_data[i],(1,1,99))
val_output = test_fn(reshape)
val_predictions = np.argmax(val_output[0])
# print 'val_predictions', val_predictions
if val_predictions==2:
hist[0]+=1
if val_predictions==0:
hist[1]+=1
if val_predictions==1:
hist[2]+=1
# hist[val_predictions]+=1
# print val_predictions
# print test_label[i]
# exit(0)
if val_predictions==test_label[i]:
correct+=1
total+=1
else:
total+=1
tacc=float(correct)/float(total)
print 'epoch', epoch, 't_loss', t_loss/n_loss, 't_norm', t_norm/n_loss, 'tacc', tacc, 'hist', hist,'train dist',train_dist, 'testdist', test_dist
f = open('model.pickle', 'wb')
cPickle.dump([train_data,train_label,test_data,test_label,L.layers.get_all_param_values(network)], f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
#for i in range(0,len(data)):
# print data[i], label[i]
#
#print data.shape, label.shape
| mit | -5,686,277,558,314,880,000 | 33.21519 | 149 | 0.671846 | false |
google-research/google-research | correct_batch_effects_wdn/distance.py | 1 | 4016 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Compute various distance metrics for probability densities."""
import numpy as np
import pandas as pd
import sklearn.metrics.pairwise
def _combine(v1, v2):
"""Combine a vector and a vector or array into a single vector."""
return np.concatenate((v1, v2.reshape(-1)))
def _split(v, col1, col2):
"""Split a vector into a vector + a vector or array.
The first vector is 1D with col1 columns. The second has col2 columns and
is a 1-D vector if len(v) == col1 + col2 or an array otherwise.
Args:
v: vector to split
col1: number of columns for the first portion
col2: number of columns for the second portion
Returns:
A tuple consisting of the first split vector and the second.
"""
v1 = v[:col1]
v2 = v[col1:]
if len(v2) == col2:
return v1, v2
return v1, v2.reshape([-1, col2])
def _wrapped_dist_fn(v1, v2, dist_fn=None, dfcol=None, auxcol=None):
"""Wrapper for a distance function that splits the inputs.
This allows us to use distances that require auxiliary quantities with
sklearn's parwise_distances function.
Args:
v1: first input vector - will be split
v2: second input vector - will be split
dist_fn: distance function to call on split vectors
dfcol: number of columns for the first split portion
auxcol: number of columns for the second split portion
Returns:
The value of dist_fn called on the split versions of v1 and v2.
"""
v11, v12 = _split(v1, dfcol, auxcol)
v21, v22 = _split(v2, dfcol, auxcol)
return dist_fn(v11, v21, v12, v22)
def matrix(dist_fn, df, aux_df=None, n_jobs=1, **kwds):
"""Compute a distance matrix between rows of a DataFrame.
Args:
dist_fn: A distance function. If aux_df = None, should take 2 Series
as arguments; if aux_df is a data frame, should take 4 Series as
arguments (row1, row2, aux1, aux2).
df: DataFrame for which we want to compute row distances
aux_df: optional auxiliary DataFrame whose rows provide additional
distance function arguments
n_jobs: number of parallel jobs to use in computing the distance matrix.
Note that setting n_jobs > 1 does not work well in Colab.
**kwds: additional keyword arguments are passed to sklearn's
pairwise_distances function
Returns:
A matrix of distances.
"""
dfrow, dfcol = df.shape
if aux_df is not None:
auxrow, auxcol = aux_df.shape
# aux_df specifies either a set of vectors of variances or arrays of
# covariances for use with the distance functions below. sklearn's
# pairwise distance function doesn't allow for this kind of side info,
# so we need to flatten the side information and append it to the vectors
# in df, then we need to wrap the distance functions so the side info is
# split out before computing distances.
if aux_df is not None:
combined = np.zeros([dfrow, dfcol + int(auxrow / dfrow) * auxcol])
for i, (idx, row) in enumerate(df.iterrows()):
combined[i, :] = _combine(row.values, aux_df.loc[idx].values)
kwds.update(dist_fn=dist_fn, dfcol=dfcol, auxcol=auxcol)
dist = sklearn.metrics.pairwise.pairwise_distances(
X=combined, metric=_wrapped_dist_fn, n_jobs=n_jobs, **kwds)
else:
dist = sklearn.metrics.pairwise.pairwise_distances(
X=df.values, metric=dist_fn, n_jobs=n_jobs, **kwds)
return pd.DataFrame(dist, columns=df.index, index=df.index)
| apache-2.0 | -8,645,224,372,105,230,000 | 35.509091 | 76 | 0.708167 | false |
jonemo/microscan-driver | setup.py | 1 | 1439 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='microscan',
version='0.0.2',
packages=find_packages(where='src'),
package_dir={"": "src"},
install_requires=['pyserial'],
entry_points={
'console_scripts': [
'microscan_server=microscan.tools.server:main',
],
},
description='Python driver from Microscan barcode readers',
long_description=long_description,
url='https://github.com/jonemo/microscan-driver',
author='Jonas Neubert',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Hardware :: Hardware Drivers',
],
)
| mit | 1,431,536,182,747,373,000 | 30.977778 | 65 | 0.62335 | false |
celliern/triflow | triflow/core/routines.py | 1 | 3124 | #!/usr/bin/env python
# coding=utf8
import numpy as np
import sympy as sp
class ModelRoutine:
def __init__(self, matrix, args, pars, ufunc,
reduced=False):
self.pars = list(pars) + ['periodic']
self.matrix = matrix
self.args = args
self._ufunc = ufunc
def __repr__(self):
return sp.Matrix(self.matrix.tolist()).__repr__()
class F_Routine(ModelRoutine):
"""Compute the right hand side of the dynamical system
:math:`\\frac{\\partial U}{\\partial t} = F(U)`
Parameters
----------
fields : triflow.Fields
triflow fields container generated by a triflow.Model containing the actual state of the dependent variables and helper functions.
pars : dict
dictionnary with the different physical parameters of the model and the 'periodic' key.
Returns
-------
numpy.ndarray
flat array containing the right hand side of the dynamical system.
""" # noqa
def __call__(self, fields, pars):
uargs = [fields['x'].values, *[fields[key].values
for key in self.args]]
pargs = [pars[key] + fields["x"].values * 0
if key != 'periodic' else pars[key]
for key
in self.pars]
F = self._ufunc(*uargs, *pargs)
return F
def diff_approx(self, fields, pars, eps=1E-3):
fpars = {key: pars[key] for key in self.pars}
fpars['dx'] = (fields['x'][-1] - fields['x'][0]) / fields['x'].size
U = fields.uflat
J = np.zeros((U.size, U.size))
F = self(fields, pars)
for i, u in enumerate(U):
fields_plus = fields.copy()
Up = fields_plus.uflat
Up[i] += eps
fields_plus.fill(Up)
Fplus = self(fields_plus, pars)
J[i] = (Fplus - F) / (eps)
return J.T
class J_Routine(ModelRoutine):
"""Compute the right hand side of the dynamical system
:math:`\\frac{\\partial U}{\\partial t} = F(U)`
Parameters
----------
fields : triflow.Fields
triflow fields container generated by a triflow.Model containing the actual state of the dependent variables and helper functions.
pars : dict
dictionnary with the different physical parameters of the model and the 'periodic' key.
sparse : bool, optional, default True
whether should the matrix returned as dense or sparse form.
Returns
-------
scipy.sparse.CSC or numpy.ndarray: sparse or dense form (depending of the `sparse` argument) of the Jacobian approximation of the dynamical system right hand side.
""" # noqa
def __call__(self, fields, pars, sparse=True):
uargs = [fields['x'].values,
*[fields[key].values for key in self.args]]
pargs = [(pars[key] + fields['x'] * 0).values
if key != 'periodic' else pars[key]
for key
in self.pars]
J = self._ufunc(*uargs, *pargs)
return J if sparse else J.todense()
| gpl-3.0 | -7,043,435,094,576,743,000 | 33.32967 | 169 | 0.566261 | false |
Kami/libcloud | libcloud/compute/constants.py | 1 | 371764 | # File generated by script - DO NOT EDIT manually
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INSTANCE_TYPES = {
"a1.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "a1.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton Processor",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "a1.2xlarge",
"name": "a1.2xlarge",
"ram": 16384
},
"a1.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "a1.4xlarge",
"instancesku": "JQK5AH3Y44CQ57AA",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AWS Graviton Processor",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16"
},
"id": "a1.4xlarge",
"name": "a1.4xlarge",
"ram": 32768
},
"a1.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "a1.large",
"instancesku": "FV7PUC9Y973899CS",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "4 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton Processor",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "a1.large",
"name": "a1.large",
"ram": 4096
},
"a1.medium": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "a1.medium",
"instancesku": "Q494P4JCZC9KDPCA",
"memory": "2 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AWS Graviton Processor",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1"
},
"id": "a1.medium",
"name": "a1.medium",
"ram": 2048
},
"a1.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "a1.xlarge",
"instancesku": "943GQ4TSWGS354CV",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton Processor",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "a1.xlarge",
"name": "a1.xlarge",
"ram": 8192
},
"c1.medium": {
"bandwidth": None,
"disk": 350,
"extra": {
"currentGeneration": "No",
"ecu": "5",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Compute optimized",
"instanceType": "c1.medium",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "1.7 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "2",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 350 SSD",
"vcpu": "2"
},
"id": "c1.medium",
"name": "c1.medium",
"ram": 1740
},
"c1.xlarge": {
"bandwidth": None,
"disk": 1680,
"extra": {
"currentGeneration": "No",
"ecu": "20",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Compute optimized",
"instanceType": "c1.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "7 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 420 SSD",
"vcpu": "8"
},
"id": "c1.xlarge",
"name": "c1.xlarge",
"ram": 7168
},
"c3.2xlarge": {
"bandwidth": None,
"disk": 160,
"extra": {
"clockSpeed": "2.8 GHz",
"currentGeneration": "No",
"ecu": "28",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c3.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "15 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2680 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 80 SSD",
"vcpu": "8"
},
"id": "c3.2xlarge",
"name": "c3.2xlarge",
"ram": 15360
},
"c3.4xlarge": {
"bandwidth": None,
"disk": 320,
"extra": {
"clockSpeed": "2.8 GHz",
"currentGeneration": "No",
"ecu": "55",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c3.4xlarge",
"instancesku": "BPC44EBCFMK4UP3U",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "30 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2680 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 160 SSD",
"vcpu": "16"
},
"id": "c3.4xlarge",
"name": "c3.4xlarge",
"ram": 30720
},
"c3.8xlarge": {
"bandwidth": 10,
"disk": 640,
"extra": {
"clockSpeed": "2.8 GHz",
"currentGeneration": "No",
"ecu": "108",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c3.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "60 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2680 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 320 SSD",
"vcpu": "36"
},
"id": "c3.8xlarge",
"name": "c3.8xlarge",
"ram": 61440
},
"c3.large": {
"bandwidth": None,
"disk": 32,
"extra": {
"clockSpeed": "2.8 GHz",
"currentGeneration": "No",
"ecu": "7",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c3.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "3.75 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2680 v2 (Ivy Bridge)",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 16 SSD",
"vcpu": "2"
},
"id": "c3.large",
"name": "c3.large",
"ram": 3840
},
"c3.xlarge": {
"bandwidth": None,
"disk": 80,
"extra": {
"clockSpeed": "2.8 GHz",
"currentGeneration": "No",
"ecu": "14",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c3.xlarge",
"instancesku": "37H9CQX49KK8KQ2D",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "7.5 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2680 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 40 SSD",
"vcpu": "4"
},
"id": "c3.xlarge",
"name": "c3.xlarge",
"ram": 7680
},
"c4.2xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.9 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1000 Mbps",
"ecu": "31",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c4.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "15 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2666 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "c4.2xlarge",
"name": "c4.2xlarge",
"ram": 15360
},
"c4.4xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.9 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2000 Mbps",
"ecu": "62",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c4.4xlarge",
"instancesku": "DKZ8SVQYPP9QWN2F",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "30 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2666 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16"
},
"id": "c4.4xlarge",
"name": "c4.4xlarge",
"ram": 30720
},
"c4.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.9 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4000 Mbps",
"ecu": "132",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c4.8xlarge",
"instancesku": "AKHXNUBZUVXUQVS4",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "60 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2666 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "36"
},
"id": "c4.8xlarge",
"name": "c4.8xlarge",
"ram": 61440
},
"c4.large": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.9 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "500 Mbps",
"ecu": "8",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c4.large",
"instancesku": "NJYJAMFEMWY3BC2Y",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "3.75 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2666 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "c4.large",
"name": "c4.large",
"ram": 3840
},
"c4.xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.9 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "750 Mbps",
"ecu": "16",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c4.xlarge",
"instancesku": "KT5H34MYNAZWD4VM",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "7.5 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2666 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "c4.xlarge",
"name": "c4.xlarge",
"ram": 7680
},
"c5.12xlarge": {
"bandwidth": 12,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "188",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.12xlarge",
"instancesku": "G88BQ9JNT44CJU5A",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "96 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8275L",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Deep Learning Boost; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48"
},
"id": "c5.12xlarge",
"name": "c5.12xlarge",
"ram": 98304
},
"c5.18xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9000 Mbps",
"ecu": "281",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.18xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "144 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "144",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "72"
},
"id": "c5.18xlarge",
"name": "c5.18xlarge",
"ram": 147456
},
"c5.24xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "375",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.24xlarge",
"instancesku": "DFU3RR7RPDU5NHP6",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "192 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8275L",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Deep Learning Boost; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96"
},
"id": "c5.24xlarge",
"name": "c5.24xlarge",
"ram": 196608
},
"c5.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "39",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.2xlarge",
"instancesku": "DFH4W5NFFA5EGBVU",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "c5.2xlarge",
"name": "c5.2xlarge",
"ram": 16384
},
"c5.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "73",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.4xlarge",
"instancesku": "ZJCH42UY78N76ZSX",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16"
},
"id": "c5.4xlarge",
"name": "c5.4xlarge",
"ram": 32768
},
"c5.9xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4500 Mbps",
"ecu": "139",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.9xlarge",
"instancesku": "8UXUVRFUWYET2CA4",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "72 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "72",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "36"
},
"id": "c5.9xlarge",
"name": "c5.9xlarge",
"ram": 73728
},
"c5.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.large",
"instancesku": "ZGCJFX4FNDKBY4B4",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "4 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "c5.large",
"name": "c5.large",
"ram": 4096
},
"c5.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "20",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "c5.xlarge",
"name": "c5.xlarge",
"ram": 8192
},
"c5d.12xlarge": {
"bandwidth": 12,
"disk": 1800,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "188",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.12xlarge",
"instancesku": "65HKXNPPQZVPW4JB",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "96 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8275L",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Deep Learning Boost; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "48"
},
"id": "c5d.12xlarge",
"name": "c5d.12xlarge",
"ram": 98304
},
"c5d.18xlarge": {
"bandwidth": 25,
"disk": 1800,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9000 Mbps",
"ecu": "281",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.18xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "144 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "144",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "72"
},
"id": "c5d.18xlarge",
"name": "c5d.18xlarge",
"ram": 147456
},
"c5d.24xlarge": {
"bandwidth": 25,
"disk": 3600,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "375",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.24xlarge",
"instancesku": "PC4G8JDRGDDX5R8B",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "192 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8275L",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Deep Learning Boost; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96"
},
"id": "c5d.24xlarge",
"name": "c5d.24xlarge",
"ram": 196608
},
"c5d.2xlarge": {
"bandwidth": 10,
"disk": 200,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "39",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 200 NVMe SSD",
"vcpu": "8"
},
"id": "c5d.2xlarge",
"name": "c5d.2xlarge",
"ram": 16384
},
"c5d.4xlarge": {
"bandwidth": 10,
"disk": 400,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "73",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 400 NVMe SSD",
"vcpu": "16"
},
"id": "c5d.4xlarge",
"name": "c5d.4xlarge",
"ram": 32768
},
"c5d.9xlarge": {
"bandwidth": 10,
"disk": 900,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4500 Mbps",
"ecu": "139",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.9xlarge",
"instancesku": "6PPHHXHGC4KS4FD2",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "72 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "72",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 900 NVMe SSD",
"vcpu": "36"
},
"id": "c5d.9xlarge",
"name": "c5d.9xlarge",
"ram": 73728
},
"c5d.large": {
"bandwidth": 10,
"disk": 50,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.large",
"instancesku": "DZ66HJ5ZUN3XZX8X",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "4 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 50 NVMe SSD",
"vcpu": "2"
},
"id": "c5d.large",
"name": "c5d.large",
"ram": 4096
},
"c5d.xlarge": {
"bandwidth": 10,
"disk": 100,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2250 Mbps",
"ecu": "20",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5d.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 100 NVMe SSD",
"vcpu": "4"
},
"id": "c5d.xlarge",
"name": "c5d.xlarge",
"ram": 8192
},
"c5n.18xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "281",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5n.18xlarge",
"instancesku": "GP9J882PRJR3HFWT",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "192 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "144",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "72"
},
"id": "c5n.18xlarge",
"name": "c5n.18xlarge",
"ram": 196608
},
"c5n.2xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "39",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5n.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "21 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "c5n.2xlarge",
"name": "c5n.2xlarge",
"ram": 21504
},
"c5n.4xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "73",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5n.4xlarge",
"instancesku": "RXTSEWTUD8EWJENN",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "42 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16"
},
"id": "c5n.4xlarge",
"name": "c5n.4xlarge",
"ram": 43008
},
"c5n.9xlarge": {
"bandwidth": 50,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "139",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5n.9xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "96 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "72",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "36"
},
"id": "c5n.9xlarge",
"name": "c5n.9xlarge",
"ram": 98304
},
"c5n.large": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5n.large",
"instancesku": "RGPG564XSTMQEEGT",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "5.25 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "c5n.large",
"name": "c5n.large",
"ram": 5376
},
"c5n.xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "3.0 Ghz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "20",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Compute optimized",
"instanceType": "c5n.xlarge",
"instancesku": "XPUQY5QHXK4NZNKG",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "10.5 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8124M",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "c5n.xlarge",
"name": "c5n.xlarge",
"ram": 10752
},
"cc2.8xlarge": {
"bandwidth": 10,
"disk": 3360,
"extra": {
"clockSpeed": "2.6 GHz",
"currentGeneration": "No",
"ecu": "88",
"instanceFamily": "Compute optimized",
"instanceType": "cc2.8xlarge",
"memory": "60.5 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2670",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 840",
"vcpu": "32"
},
"id": "cc2.8xlarge",
"name": "cc2.8xlarge",
"ram": 61952
},
"cr1.8xlarge": {
"bandwidth": 10,
"disk": 240,
"extra": {
"currentGeneration": "No",
"ecu": "88",
"instanceFamily": "Memory optimized",
"instanceType": "cr1.8xlarge",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2670",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 120 SSD",
"vcpu": "32"
},
"id": "cr1.8xlarge",
"name": "cr1.8xlarge",
"ram": 249856
},
"d2.2xlarge": {
"bandwidth": None,
"disk": 12000,
"extra": {
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1000 Mbps",
"ecu": "28",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d2.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "61 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "6 x 2000 HDD",
"vcpu": "8"
},
"id": "d2.2xlarge",
"name": "d2.2xlarge",
"ram": 62464
},
"d2.4xlarge": {
"bandwidth": None,
"disk": 24000,
"extra": {
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2000 Mbps",
"ecu": "56",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d2.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "122 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "12 x 2000 HDD",
"vcpu": "16"
},
"id": "d2.4xlarge",
"name": "d2.4xlarge",
"ram": 124928
},
"d2.8xlarge": {
"bandwidth": 10,
"disk": 48000,
"extra": {
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4000 Mbps",
"ecu": "116",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d2.8xlarge",
"instancesku": "XP5P8NMSB2W7KP3U",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "24 x 2000 HDD",
"vcpu": "36"
},
"id": "d2.8xlarge",
"name": "d2.8xlarge",
"ram": 249856
},
"d2.xlarge": {
"bandwidth": None,
"disk": 6000,
"extra": {
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "750 Mbps",
"ecu": "14",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "d2.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "30.5 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "3 x 2000 HDD",
"vcpu": "4"
},
"id": "d2.xlarge",
"name": "d2.xlarge",
"ram": 31232
},
"f1.16xlarge": {
"bandwidth": 20,
"disk": 3760,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "201",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "FPGA Instances",
"instanceType": "f1.16xlarge",
"instancesku": "P4PZ87XYY47FTQT9",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "976 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 940 NVMe SSD",
"vcpu": "64"
},
"id": "f1.16xlarge",
"name": "f1.16xlarge",
"ram": 999424
},
"f1.2xlarge": {
"bandwidth": 10,
"disk": 470,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1700 Mbps",
"ecu": "31",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "FPGA Instances",
"instanceType": "f1.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "122 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 470 NVMe SSD",
"vcpu": "8"
},
"id": "f1.2xlarge",
"name": "f1.2xlarge",
"ram": 124928
},
"f1.4xlarge": {
"bandwidth": 10,
"disk": 940,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "58",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "FPGA Instances",
"instanceType": "f1.4xlarge",
"instancesku": "3294757KFZTHDXMC",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 940 NVMe SSD",
"vcpu": "16"
},
"id": "f1.4xlarge",
"name": "f1.4xlarge",
"ram": 249856
},
"g2.2xlarge": {
"bandwidth": None,
"disk": 60,
"extra": {
"clockSpeed": "2.6 GHz",
"currentGeneration": "No",
"ecu": "26",
"enhancedNetworkingSupported": "No",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g2.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "15 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2670 (Sandy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 60 SSD",
"vcpu": "8"
},
"id": "g2.2xlarge",
"name": "g2.2xlarge",
"ram": 15360
},
"g2.8xlarge": {
"bandwidth": None,
"disk": 240,
"extra": {
"clockSpeed": "2.6 GHz",
"currentGeneration": "No",
"ecu": "104",
"enhancedNetworkingSupported": "No",
"gpu": "4",
"instanceFamily": "GPU instance",
"instanceType": "g2.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "60 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2670 (Sandy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 120 SSD",
"vcpu": "32"
},
"id": "g2.8xlarge",
"name": "g2.8xlarge",
"ram": 61440
},
"g3.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "201",
"enhancedNetworkingSupported": "Yes",
"gpu": "4",
"instanceFamily": "GPU instance",
"instanceType": "g3.16xlarge",
"instancesku": "9JX6W8YKTGAXCFNB",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "488 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64"
},
"id": "g3.16xlarge",
"name": "g3.16xlarge",
"ram": 499712
},
"g3.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "58",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g3.4xlarge",
"instancesku": "XQVRFGT3HCZ63BJ5",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "122 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16"
},
"id": "g3.4xlarge",
"name": "g3.4xlarge",
"ram": 124928
},
"g3.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "97",
"enhancedNetworkingSupported": "Yes",
"gpu": "2",
"instanceFamily": "GPU instance",
"instanceType": "g3.8xlarge",
"instancesku": "8DQCJ3V7VZPTQK24",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32"
},
"id": "g3.8xlarge",
"name": "g3.8xlarge",
"ram": 249856
},
"g3s.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "100 Mbps",
"ecu": "13",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g3s.xlarge",
"instancesku": "CH4D8VQYKHAT566Q",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "30.5 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "g3s.xlarge",
"name": "g3s.xlarge",
"ram": 31232
},
"g4dn.12xlarge": {
"bandwidth": 50,
"disk": 900,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "4",
"instanceFamily": "GPU instance",
"instanceType": "g4dn.12xlarge",
"instancesku": "KYG4GJZXW67JH275",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "192 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "900 GB NVMe SSD",
"vcpu": "48"
},
"id": "g4dn.12xlarge",
"name": "g4dn.12xlarge",
"ram": 196608
},
"g4dn.16xlarge": {
"bandwidth": 50,
"disk": 900,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g4dn.16xlarge",
"instancesku": "EV7U94XE5G2K8RWP",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "256 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "900 GB NVMe SSD",
"vcpu": "64"
},
"id": "g4dn.16xlarge",
"name": "g4dn.16xlarge",
"ram": 262144
},
"g4dn.2xlarge": {
"bandwidth": 25,
"disk": 225,
"extra": {
"currentGeneration": "Yes",
"ecu": "NA",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g4dn.2xlarge",
"instancesku": "3W5TQY9NMW3Z6HKV",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "225 GB NVMe SSD",
"vcpu": "8"
},
"id": "g4dn.2xlarge",
"name": "g4dn.2xlarge",
"ram": 32768
},
"g4dn.4xlarge": {
"bandwidth": 25,
"disk": 225,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g4dn.4xlarge",
"instancesku": "P2PYJRR8WRUQQ83N",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "64 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "225 GB NVMe SSD",
"vcpu": "16"
},
"id": "g4dn.4xlarge",
"name": "g4dn.4xlarge",
"ram": 65536
},
"g4dn.8xlarge": {
"bandwidth": 50,
"disk": 900,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g4dn.8xlarge",
"instancesku": "E4ENVUU4X4C9CA2A",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "128 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "900 GB NVMe SSD",
"vcpu": "32"
},
"id": "g4dn.8xlarge",
"name": "g4dn.8xlarge",
"ram": 131072
},
"g4dn.xlarge": {
"bandwidth": 25,
"disk": 125,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "g4dn.xlarge",
"instancesku": "CBN4GYUGRUCDMWNY",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "125 GB NVMe SSD",
"vcpu": "4"
},
"id": "g4dn.xlarge",
"name": "g4dn.xlarge",
"ram": 16384
},
"h1.16xlarge": {
"bandwidth": 25,
"disk": 16000,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "201",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "h1.16xlarge",
"instancesku": "95Z3VYWMT3AARF75",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "256 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "8 x 2000 HDD",
"vcpu": "64"
},
"id": "h1.16xlarge",
"name": "h1.16xlarge",
"ram": 262144
},
"h1.2xlarge": {
"bandwidth": 10,
"disk": 2000,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1750 Mbps",
"ecu": "31",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "h1.2xlarge",
"instancesku": "EDHN83CZXXV35NWH",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 2000 HDD",
"vcpu": "8"
},
"id": "h1.2xlarge",
"name": "h1.2xlarge",
"ram": 32768
},
"h1.4xlarge": {
"bandwidth": 10,
"disk": 4000,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "58",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "h1.4xlarge",
"instancesku": "E3W8QW3X5FF55BPW",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 2000 HDD",
"vcpu": "16"
},
"id": "h1.4xlarge",
"name": "h1.4xlarge",
"ram": 65536
},
"h1.8xlarge": {
"bandwidth": 10,
"disk": 8000,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "97",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "h1.8xlarge",
"instancesku": "V92EE38S25QMHCHR",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "128 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 2000 HDD",
"vcpu": "32"
},
"id": "h1.8xlarge",
"name": "h1.8xlarge",
"ram": 131072
},
"hs1.8xlarge": {
"bandwidth": 10,
"disk": 48000,
"extra": {
"clockSpeed": "2 GHz",
"currentGeneration": "No",
"ecu": "35",
"instanceFamily": "Storage optimized",
"instanceType": "hs1.8xlarge",
"memory": "117 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2650",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "24 x 2000",
"vcpu": "17"
},
"id": "hs1.8xlarge",
"name": "hs1.8xlarge",
"ram": 119808
},
"i2.2xlarge": {
"bandwidth": None,
"disk": 1600,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"dedicatedEbsThroughput": "1500 Mbps",
"ecu": "27",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i2.2xlarge",
"instancesku": "S3CUTS4VY2N7TUAC",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "61 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 800 SSD",
"vcpu": "8"
},
"id": "i2.2xlarge",
"name": "i2.2xlarge",
"ram": 62464
},
"i2.4xlarge": {
"bandwidth": None,
"disk": 3200,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"dedicatedEbsThroughput": "1500 Mbps",
"ecu": "53",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i2.4xlarge",
"instancesku": "QAS6U8F8Z43CEYDJ",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "122 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 800 SSD",
"vcpu": "16"
},
"id": "i2.4xlarge",
"name": "i2.4xlarge",
"ram": 124928
},
"i2.8xlarge": {
"bandwidth": 10,
"disk": 6400,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"dedicatedEbsThroughput": "1500 Mbps",
"ecu": "104",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i2.8xlarge",
"instancesku": "GJUYG5H8K4MVD9B2",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "8 x 800 SSD",
"vcpu": "32"
},
"id": "i2.8xlarge",
"name": "i2.8xlarge",
"ram": 249856
},
"i2.xlarge": {
"bandwidth": None,
"disk": 800,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"dedicatedEbsThroughput": "750 Mbps",
"ecu": "14",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i2.xlarge",
"instancesku": "VJV23STC4X8PNAQM",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "30.5 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 800 SSD",
"vcpu": "4"
},
"id": "i2.xlarge",
"name": "i2.xlarge",
"ram": 31232
},
"i3.16xlarge": {
"bandwidth": 20,
"disk": 15200,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "201",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "488 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "8 x 1900 NVMe SSD",
"vcpu": "64"
},
"id": "i3.16xlarge",
"name": "i3.16xlarge",
"ram": 499712
},
"i3.2xlarge": {
"bandwidth": 10,
"disk": 1900,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1750 Mbps",
"ecu": "31",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "61 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1900 NVMe SSD",
"vcpu": "8"
},
"id": "i3.2xlarge",
"name": "i3.2xlarge",
"ram": 62464
},
"i3.4xlarge": {
"bandwidth": 10,
"disk": 3800,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "53",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3.4xlarge",
"memory": "122 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX, Intel AVX2, Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1900 NVMe SSD",
"vcpu": "16"
},
"id": "i3.4xlarge",
"name": "i3.4xlarge",
"ram": 124928
},
"i3.8xlarge": {
"bandwidth": 10,
"disk": 7600,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "97",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 1900 NVMe SSD",
"vcpu": "32"
},
"id": "i3.8xlarge",
"name": "i3.8xlarge",
"ram": 249856
},
"i3.large": {
"bandwidth": 10,
"disk": 475,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "425 Mbps",
"ecu": "8",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3.large",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "15.25 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 475 NVMe SSD",
"vcpu": "2"
},
"id": "i3.large",
"name": "i3.large",
"ram": 15616
},
"i3.xlarge": {
"bandwidth": 10,
"disk": 950,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "850 Mbps",
"ecu": "16",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3.xlarge",
"instancesku": "99QZ9VR3AF2FW3TB",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "30.5 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 950 NVMe SSD",
"vcpu": "4"
},
"id": "i3.xlarge",
"name": "i3.xlarge",
"ram": 31232
},
"i3en.12xlarge": {
"bandwidth": 50,
"disk": 30000,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "168",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.12xlarge",
"instancesku": "GVPZRNHNZPE4JV4G",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "384 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 7500 NVMe SSD",
"vcpu": "48"
},
"id": "i3en.12xlarge",
"name": "i3en.12xlarge",
"ram": 393216
},
"i3en.24xlarge": {
"bandwidth": 100,
"disk": 60000,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "337",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.24xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "768 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "8 x 7500 NVMe SSD",
"vcpu": "96"
},
"id": "i3en.24xlarge",
"name": "i3en.24xlarge",
"ram": 786432
},
"i3en.2xlarge": {
"bandwidth": 25,
"disk": 5000,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "37",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.2xlarge",
"instancesku": "KPSBHZJSEURW9AUU",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "64 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 2500 NVMe SSD",
"vcpu": "8"
},
"id": "i3en.2xlarge",
"name": "i3en.2xlarge",
"ram": 65536
},
"i3en.3xlarge": {
"bandwidth": 25,
"disk": 7500,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.3xlarge",
"instancesku": "4M9CWY3DF8ZQRQ2F",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "96 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "24",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 7500 NVMe SSD",
"vcpu": "12"
},
"id": "i3en.3xlarge",
"name": "i3en.3xlarge",
"ram": 98304
},
"i3en.6xlarge": {
"bandwidth": 25,
"disk": 15000,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.6xlarge",
"instancesku": "8RHN5TZKA9MW3AWW",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "192 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "48",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 7500 NVMe SSD",
"vcpu": "24"
},
"id": "i3en.6xlarge",
"name": "i3en.6xlarge",
"ram": 196608
},
"i3en.large": {
"bandwidth": 25,
"disk": 1250,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.large",
"instancesku": "EDC6Z6KEEJ6KENE8",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1250 NVMe SSD",
"vcpu": "2"
},
"id": "i3en.large",
"name": "i3en.large",
"ram": 16384
},
"i3en.xlarge": {
"bandwidth": 25,
"disk": 2500,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Storage optimized",
"instanceType": "i3en.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 2500 NVMe SSD",
"vcpu": "4"
},
"id": "i3en.xlarge",
"name": "i3en.xlarge",
"ram": 32768
},
"inf1.24xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Machine Learning ASIC Instances",
"instanceType": "inf1.24xlarge",
"instancesku": "YKZ73T2PBEWV8HSM",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "192 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Platinum 8275CL (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96"
},
"id": "inf1.24xlarge",
"name": "inf1.24xlarge",
"ram": 196608
},
"inf1.2xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "875 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Machine Learning ASIC Instances",
"instanceType": "inf1.2xlarge",
"instancesku": "66AAM3URQPDCTMXY",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Platinum 8275CL (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "inf1.2xlarge",
"name": "inf1.2xlarge",
"ram": 16384
},
"inf1.6xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Machine Learning ASIC Instances",
"instanceType": "inf1.6xlarge",
"instancesku": "CGXS8PYBHFGKES9Z",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "48 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Platinum 8275CL (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "24"
},
"id": "inf1.6xlarge",
"name": "inf1.6xlarge",
"ram": 49152
},
"inf1.xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "875 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Machine Learning ASIC Instances",
"instanceType": "inf1.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "8 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "NA",
"physicalProcessor": "Intel Xeon Platinum 8275CL (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "inf1.xlarge",
"name": "inf1.xlarge",
"ram": 8192
},
"m1.large": {
"bandwidth": None,
"disk": 840,
"extra": {
"currentGeneration": "No",
"ecu": "4",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m1.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "7.5 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 420 SSD",
"vcpu": "2"
},
"id": "m1.large",
"name": "m1.large",
"ram": 7680
},
"m1.medium": {
"bandwidth": None,
"disk": 410,
"extra": {
"currentGeneration": "No",
"ecu": "2",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m1.medium",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "3.75 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "2",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 410 SSD",
"vcpu": "1"
},
"id": "m1.medium",
"name": "m1.medium",
"ram": 3840
},
"m1.small": {
"bandwidth": None,
"disk": 160,
"extra": {
"currentGeneration": "No",
"ecu": "1",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m1.small",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "1.7 GiB",
"networkPerformance": "Low",
"normalizationSizeFactor": "1",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 160 SSD",
"vcpu": "1"
},
"id": "m1.small",
"name": "m1.small",
"ram": 1740
},
"m1.xlarge": {
"bandwidth": None,
"disk": 1680,
"extra": {
"currentGeneration": "No",
"ecu": "8",
"instanceFamily": "General purpose",
"instanceType": "m1.xlarge",
"memory": "15 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 420",
"vcpu": "4"
},
"id": "m1.xlarge",
"name": "m1.xlarge",
"ram": 15360
},
"m2.2xlarge": {
"bandwidth": None,
"disk": 850,
"extra": {
"currentGeneration": "No",
"ecu": "13",
"instanceFamily": "Memory optimized",
"instanceType": "m2.2xlarge",
"memory": "34.2 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 850",
"vcpu": "4"
},
"id": "m2.2xlarge",
"name": "m2.2xlarge",
"ram": 35020
},
"m2.4xlarge": {
"bandwidth": None,
"disk": 1680,
"extra": {
"currentGeneration": "No",
"ecu": "26",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "m2.4xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "68.4 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 840 SSD",
"vcpu": "8"
},
"id": "m2.4xlarge",
"name": "m2.4xlarge",
"ram": 70041
},
"m2.xlarge": {
"bandwidth": None,
"disk": 420,
"extra": {
"currentGeneration": "No",
"ecu": "6.5",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "m2.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "17.1 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 420 SSD",
"vcpu": "2"
},
"id": "m2.xlarge",
"name": "m2.xlarge",
"ram": 17510
},
"m3.2xlarge": {
"bandwidth": None,
"disk": 160,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"dedicatedEbsThroughput": "1000 Mbps",
"ecu": "26",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m3.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "30 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 80 SSD",
"vcpu": "8"
},
"id": "m3.2xlarge",
"name": "m3.2xlarge",
"ram": 30720
},
"m3.large": {
"bandwidth": None,
"disk": 32,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "6.5",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m3.large",
"instancesku": "8KTQAHWA58GUHDGC",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "7.5 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 32 SSD",
"vcpu": "2"
},
"id": "m3.large",
"name": "m3.large",
"ram": 7680
},
"m3.medium": {
"bandwidth": None,
"disk": 4,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "3",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m3.medium",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "3.75 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "2",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 4 SSD",
"vcpu": "1"
},
"id": "m3.medium",
"name": "m3.medium",
"ram": 3840
},
"m3.xlarge": {
"bandwidth": None,
"disk": 80,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"dedicatedEbsThroughput": "500 Mbps",
"ecu": "13",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m3.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "15 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 40 SSD",
"vcpu": "4"
},
"id": "m3.xlarge",
"name": "m3.xlarge",
"ram": 15360
},
"m4.10xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4000 Mbps",
"ecu": "124.5",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m4.10xlarge",
"instancesku": "MDVS9QJ4U4VXR7P6",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "160 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "80",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "40"
},
"id": "m4.10xlarge",
"name": "m4.10xlarge",
"ram": 163840
},
"m4.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "188",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m4.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "256 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64"
},
"id": "m4.16xlarge",
"name": "m4.16xlarge",
"ram": 262144
},
"m4.2xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1000 Mbps",
"ecu": "26",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m4.2xlarge",
"memory": "32 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "m4.2xlarge",
"name": "m4.2xlarge",
"ram": 32768
},
"m4.4xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2000 Mbps",
"ecu": "53.5",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m4.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "64 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16"
},
"id": "m4.4xlarge",
"name": "m4.4xlarge",
"ram": 65536
},
"m4.large": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "450 Mbps",
"ecu": "6.5",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m4.large",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "8 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "m4.large",
"name": "m4.large",
"ram": 8192
},
"m4.xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.4 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "750 Mbps",
"ecu": "13",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m4.xlarge",
"instancesku": "5RFD7HVPT29ZZRJZ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "16 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2676 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "m4.xlarge",
"name": "m4.xlarge",
"ram": 16384
},
"m5.12xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "168",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.12xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "192 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48"
},
"id": "m5.12xlarge",
"name": "m5.12xlarge",
"ram": 196608
},
"m5.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "256",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "256 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64"
},
"id": "m5.16xlarge",
"name": "m5.16xlarge",
"ram": 262144
},
"m5.24xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "337",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "384 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96"
},
"id": "m5.24xlarge",
"name": "m5.24xlarge",
"ram": 393216
},
"m5.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "37",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "m5.2xlarge",
"name": "m5.2xlarge",
"ram": 32768
},
"m5.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "70",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.4xlarge",
"instancesku": "GPN9XWJXU6R7VNVU",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16"
},
"id": "m5.4xlarge",
"name": "m5.4xlarge",
"ram": 65536
},
"m5.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "128",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "128 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32"
},
"id": "m5.8xlarge",
"name": "m5.8xlarge",
"ram": 131072
},
"m5.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Upto 2120 Mbps",
"ecu": "8",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.large",
"instancesku": "2WTZUAPYXZ92MPYV",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX, Intel AVX2, Intel AVX512, Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "m5.large",
"name": "m5.large",
"ram": 8192
},
"m5.metal": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "345",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.metal",
"instancesku": "NEA5ZGBNTVZY47F8",
"memory": "384 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX, Intel AVX2, Intel AVX512, Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96"
},
"id": "m5.metal",
"name": "m5.metal",
"ram": 393216
},
"m5.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "16",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "m5.xlarge",
"name": "m5.xlarge",
"ram": 16384
},
"m5a.12xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.12xlarge",
"instancesku": "NTCQHKBGDWZV9A4S",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "192 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48"
},
"id": "m5a.12xlarge",
"name": "m5a.12xlarge",
"ram": 196608
},
"m5a.16xlarge": {
"bandwidth": 12,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.16xlarge",
"instancesku": "PDTXJ9FD3DSMTAWR",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "256 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64"
},
"id": "m5a.16xlarge",
"name": "m5a.16xlarge",
"ram": 262144
},
"m5a.24xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.24xlarge",
"instancesku": "3DZQSBC8ACN83U66",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "384 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96"
},
"id": "m5a.24xlarge",
"name": "m5a.24xlarge",
"ram": 393216
},
"m5a.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.2xlarge",
"instancesku": "SQ6QFVBCH7QJN7WE",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "m5a.2xlarge",
"name": "m5a.2xlarge",
"ram": 32768
},
"m5a.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.4xlarge",
"instancesku": "YHV5N59X2JZJT7UF",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16"
},
"id": "m5a.4xlarge",
"name": "m5a.4xlarge",
"ram": 65536
},
"m5a.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "128 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32"
},
"id": "m5a.8xlarge",
"name": "m5a.8xlarge",
"ram": 131072
},
"m5a.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.large",
"instancesku": "YETHAPD3GP4N3ZNK",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "m5a.large",
"name": "m5a.large",
"ram": 8192
},
"m5a.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5a.xlarge",
"instancesku": "2DC7PA6547H9EQEU",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "m5a.xlarge",
"name": "m5a.xlarge",
"ram": 16384
},
"m5ad.12xlarge": {
"bandwidth": 10,
"disk": 1800,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "192 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "48"
},
"id": "m5ad.12xlarge",
"name": "m5ad.12xlarge",
"ram": 196608
},
"m5ad.16xlarge": {
"bandwidth": 12,
"disk": 2400,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "256 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 600 NVMe SSD",
"vcpu": "64"
},
"id": "m5ad.16xlarge",
"name": "m5ad.16xlarge",
"ram": 262144
},
"m5ad.24xlarge": {
"bandwidth": 20,
"disk": 3600,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.24xlarge",
"instancesku": "6QSKH897CEX9D5PD",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "384 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96"
},
"id": "m5ad.24xlarge",
"name": "m5ad.24xlarge",
"ram": 393216
},
"m5ad.2xlarge": {
"bandwidth": 10,
"disk": 300,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8"
},
"id": "m5ad.2xlarge",
"name": "m5ad.2xlarge",
"ram": 32768
},
"m5ad.4xlarge": {
"bandwidth": 10,
"disk": 600,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.4xlarge",
"instancesku": "53D2DUQDA28GP4FA",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 300 NVMe SSD",
"vcpu": "16"
},
"id": "m5ad.4xlarge",
"name": "m5ad.4xlarge",
"ram": 65536
},
"m5ad.8xlarge": {
"bandwidth": 10,
"disk": 1200,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "128 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 600 NVMe SSD",
"vcpu": "32"
},
"id": "m5ad.8xlarge",
"name": "m5ad.8xlarge",
"ram": 131072
},
"m5ad.large": {
"bandwidth": 10,
"disk": 75,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.large",
"instancesku": "DV2FPCRNYQC737FY",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2"
},
"id": "m5ad.large",
"name": "m5ad.large",
"ram": 8192
},
"m5ad.xlarge": {
"bandwidth": 10,
"disk": 150,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5ad.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4"
},
"id": "m5ad.xlarge",
"name": "m5ad.xlarge",
"ram": 16384
},
"m5d.12xlarge": {
"bandwidth": 10,
"disk": 1800,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6000 Mbps",
"ecu": "168",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.12xlarge",
"instancesku": "GMGGY8H6FE639R7J",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "192 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "48"
},
"id": "m5d.12xlarge",
"name": "m5d.12xlarge",
"ram": 196608
},
"m5d.16xlarge": {
"bandwidth": 20,
"disk": 2400,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "256",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "256 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 600 NVMe SSD",
"vcpu": "64"
},
"id": "m5d.16xlarge",
"name": "m5d.16xlarge",
"ram": 262144
},
"m5d.24xlarge": {
"bandwidth": 25,
"disk": 3600,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "337",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "384 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96"
},
"id": "m5d.24xlarge",
"name": "m5d.24xlarge",
"ram": 393216
},
"m5d.2xlarge": {
"bandwidth": 10,
"disk": 300,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "37",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8"
},
"id": "m5d.2xlarge",
"name": "m5d.2xlarge",
"ram": 32768
},
"m5d.4xlarge": {
"bandwidth": 10,
"disk": 600,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "70",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.4xlarge",
"instancesku": "SJTJT7YB2TAKEFUZ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 300 NVMe SSD",
"vcpu": "16"
},
"id": "m5d.4xlarge",
"name": "m5d.4xlarge",
"ram": 65536
},
"m5d.8xlarge": {
"bandwidth": 10,
"disk": 1200,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "128",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.8xlarge",
"instancesku": "7WTXDDTKVDX74SRU",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "128 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 600 NVMe SSD",
"vcpu": "32"
},
"id": "m5d.8xlarge",
"name": "m5d.8xlarge",
"ram": 131072
},
"m5d.large": {
"bandwidth": 10,
"disk": 75,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.large",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2"
},
"id": "m5d.large",
"name": "m5d.large",
"ram": 8192
},
"m5d.xlarge": {
"bandwidth": 10,
"disk": 150,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Upto 2120 Mbps",
"ecu": "16",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "m5d.xlarge",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8175",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX, Intel AVX2, Intel AVX512, Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4"
},
"id": "m5d.xlarge",
"name": "m5d.xlarge",
"ram": 16384
},
"m5dn.12xlarge": {
"bandwidth": 50,
"disk": 1800,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "192 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 GB NVMe SSD",
"vcpu": "48"
},
"id": "m5dn.12xlarge",
"name": "m5dn.12xlarge",
"ram": 196608
},
"m5dn.16xlarge": {
"bandwidth": 75,
"disk": 2400,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.16xlarge",
"instancesku": "2UXZVUW3AW4RWSVK",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "256 GiB",
"networkPerformance": "75 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 600 NVMe SSD",
"vcpu": "64"
},
"id": "m5dn.16xlarge",
"name": "m5dn.16xlarge",
"ram": 262144
},
"m5dn.24xlarge": {
"bandwidth": 100,
"disk": 3600,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.24xlarge",
"instancesku": "3CSFMMFP6H6R4C8N",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "384 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96"
},
"id": "m5dn.24xlarge",
"name": "m5dn.24xlarge",
"ram": 393216
},
"m5dn.2xlarge": {
"bandwidth": 25,
"disk": 300,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.2xlarge",
"instancesku": "EFERZJ2267U6HKKN",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8"
},
"id": "m5dn.2xlarge",
"name": "m5dn.2xlarge",
"ram": 32768
},
"m5dn.4xlarge": {
"bandwidth": 25,
"disk": 600,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.4xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "64 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 300 NVMe SSD",
"vcpu": "16"
},
"id": "m5dn.4xlarge",
"name": "m5dn.4xlarge",
"ram": 65536
},
"m5dn.8xlarge": {
"bandwidth": 25,
"disk": 1200,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "128 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 600 NVMe SSD",
"vcpu": "32"
},
"id": "m5dn.8xlarge",
"name": "m5dn.8xlarge",
"ram": 131072
},
"m5dn.large": {
"bandwidth": 25,
"disk": 75,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.large",
"instancesku": "TP3PAHR3346GJ67W",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "8 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2"
},
"id": "m5dn.large",
"name": "m5dn.large",
"ram": 8192
},
"m5dn.xlarge": {
"bandwidth": 25,
"disk": 150,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5dn.xlarge",
"instancesku": "SHVB6RBA3NZZ9V4Z",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4"
},
"id": "m5dn.xlarge",
"name": "m5dn.xlarge",
"ram": 16384
},
"m5n.12xlarge": {
"bandwidth": 50,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "192 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48"
},
"id": "m5n.12xlarge",
"name": "m5n.12xlarge",
"ram": 196608
},
"m5n.16xlarge": {
"bandwidth": 75,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.16xlarge",
"instancesku": "EA8S423TQADF5CY2",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "256 GiB",
"networkPerformance": "75 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64"
},
"id": "m5n.16xlarge",
"name": "m5n.16xlarge",
"ram": 262144
},
"m5n.24xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.24xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "384 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96"
},
"id": "m5n.24xlarge",
"name": "m5n.24xlarge",
"ram": 393216
},
"m5n.2xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.2xlarge",
"instancesku": "UYEPHYDEF5R5EBP5",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "m5n.2xlarge",
"name": "m5n.2xlarge",
"ram": 32768
},
"m5n.4xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.4xlarge",
"instancesku": "ZH4K42CV65578MRJ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "64 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16"
},
"id": "m5n.4xlarge",
"name": "m5n.4xlarge",
"ram": 65536
},
"m5n.8xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.8xlarge",
"instancesku": "ATZJ9KFTKYZZMGEJ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "128 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32"
},
"id": "m5n.8xlarge",
"name": "m5n.8xlarge",
"ram": 131072
},
"m5n.large": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.large",
"instancesku": "2FNJS8YA6UBP65EW",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "8 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "m5n.large",
"name": "m5n.large",
"ram": 8192
},
"m5n.xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m5n.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "m5n.xlarge",
"name": "m5n.xlarge",
"ram": 16384
},
"m6g.12xlarge": {
"bandwidth": 12,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"ecu": "NA",
"instanceFamily": "General purpose",
"instanceType": "m6g.12xlarge",
"instancesku": "WW7AYBVNX4P6DKEG",
"memory": "192 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48"
},
"id": "m6g.12xlarge",
"name": "m6g.12xlarge",
"ram": 196608
},
"m6g.16xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "18000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m6g.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "256 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64"
},
"id": "m6g.16xlarge",
"name": "m6g.16xlarge",
"ram": 262144
},
"m6g.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2250 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m6g.2xlarge",
"instancesku": "275QHDMHEFGAB9QZ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "m6g.2xlarge",
"name": "m6g.2xlarge",
"ram": 32768
},
"m6g.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "4500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m6g.4xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16"
},
"id": "m6g.4xlarge",
"name": "m6g.4xlarge",
"ram": 65536
},
"m6g.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "9000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m6g.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "128 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32"
},
"id": "m6g.8xlarge",
"name": "m6g.8xlarge",
"ram": 131072
},
"m6g.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "600 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m6g.large",
"instancesku": "WKZUYUTMHZP7PGQS",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "8 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "m6g.large",
"name": "m6g.large",
"ram": 8192
},
"m6g.medium": {
"bandwidth": 10,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "300 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m6g.medium",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "4 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "2",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1"
},
"id": "m6g.medium",
"name": "m6g.medium",
"ram": 4096
},
"m6g.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1125 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "m6g.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AWS Graviton2 Processor",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "m6g.xlarge",
"name": "m6g.xlarge",
"ram": 16384
},
"p2.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "201",
"enhancedNetworkingSupported": "Yes",
"gpu": "16",
"instanceFamily": "GPU instance",
"instanceType": "p2.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "732 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64"
},
"id": "p2.16xlarge",
"name": "p2.16xlarge",
"ram": 749568
},
"p2.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "97",
"enhancedNetworkingSupported": "Yes",
"gpu": "8",
"instanceFamily": "GPU instance",
"instanceType": "p2.8xlarge",
"instancesku": "CJQFCM483GYMQ4H7",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "488 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32"
},
"id": "p2.8xlarge",
"name": "p2.8xlarge",
"ram": 499712
},
"p2.xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "750 Mbps",
"ecu": "16",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "p2.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "61 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "p2.xlarge",
"name": "p2.xlarge",
"ram": 62464
},
"p3.16xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "201",
"enhancedNetworkingSupported": "Yes",
"gpu": "8",
"instanceFamily": "GPU instance",
"instanceType": "p3.16xlarge",
"instancesku": "23X4DDDYPQ4U9WXE",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "488 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64"
},
"id": "p3.16xlarge",
"name": "p3.16xlarge",
"ram": 499712
},
"p3.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1750 Mbps",
"ecu": "31",
"enhancedNetworkingSupported": "Yes",
"gpu": "1",
"instanceFamily": "GPU instance",
"instanceType": "p3.2xlarge",
"instancesku": "BQCBF2WYFFDEDXN7",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "61 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "p3.2xlarge",
"name": "p3.2xlarge",
"ram": 62464
},
"p3.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "97",
"enhancedNetworkingSupported": "Yes",
"gpu": "4",
"instanceFamily": "GPU instance",
"instanceType": "p3.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32"
},
"id": "p3.8xlarge",
"name": "p3.8xlarge",
"ram": 249856
},
"p3dn.24xlarge": {
"bandwidth": 100,
"disk": 1800,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "337",
"enhancedNetworkingSupported": "Yes",
"gpu": "8",
"instanceFamily": "GPU instance",
"instanceType": "p3dn.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "768 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "96"
},
"id": "p3dn.24xlarge",
"name": "p3dn.24xlarge",
"ram": 786432
},
"r3.2xlarge": {
"bandwidth": None,
"disk": 160,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "26",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r3.2xlarge",
"instancesku": "QPBU35XRU5BNBBFE",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "61 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 160 SSD",
"vcpu": "8"
},
"id": "r3.2xlarge",
"name": "r3.2xlarge",
"ram": 62464
},
"r3.4xlarge": {
"bandwidth": None,
"disk": 320,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "52",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r3.4xlarge",
"instancesku": "Y9WDY7HG6S2NXFSP",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "122 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 320 SSD",
"vcpu": "16"
},
"id": "r3.4xlarge",
"name": "r3.4xlarge",
"ram": 124928
},
"r3.8xlarge": {
"bandwidth": 10,
"disk": 640,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "104",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r3.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 320 SSD",
"vcpu": "32"
},
"id": "r3.8xlarge",
"name": "r3.8xlarge",
"ram": 249856
},
"r3.large": {
"bandwidth": None,
"disk": 32,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "6.5",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r3.large",
"memory": "15.25 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 32 SSD",
"vcpu": "2"
},
"id": "r3.large",
"name": "r3.large",
"ram": 15616
},
"r3.xlarge": {
"bandwidth": None,
"disk": 80,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "No",
"ecu": "13",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r3.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "30.5 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 80 SSD",
"vcpu": "4"
},
"id": "r3.xlarge",
"name": "r3.xlarge",
"ram": 31232
},
"r4.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "195",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r4.16xlarge",
"instancesku": "M3PF8AJDMYE8CTHJ",
"memory": "488 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX, Intel AVX2, Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64"
},
"id": "r4.16xlarge",
"name": "r4.16xlarge",
"ram": 499712
},
"r4.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1600 Mbps",
"ecu": "31",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r4.2xlarge",
"instancesku": "T73VEGF6Z7U2FM2E",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "61 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "r4.2xlarge",
"name": "r4.2xlarge",
"ram": 62464
},
"r4.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3000 Mbps",
"ecu": "58",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r4.4xlarge",
"instancesku": "Q4JYWMFXQ9MM3WYP",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "122 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16"
},
"id": "r4.4xlarge",
"name": "r4.4xlarge",
"ram": 124928
},
"r4.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6000 Mbps",
"ecu": "97",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r4.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "244 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32"
},
"id": "r4.8xlarge",
"name": "r4.8xlarge",
"ram": 249856
},
"r4.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "400 Mbps",
"ecu": "7",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r4.large",
"instancesku": "EMEXNPTCFPMK9SZ6",
"memory": "15.25 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX, Intel AVX2, Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "r4.large",
"name": "r4.large",
"ram": 15616
},
"r4.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "800 Mbps",
"ecu": "16",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r4.xlarge",
"instancesku": "Q985NTC5RUN6W6FN",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "30.5 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "r4.xlarge",
"name": "r4.xlarge",
"ram": 31232
},
"r5.12xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6000 Mbps",
"ecu": "168",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.12xlarge",
"instancesku": "DEBR9WT8ZEH5SVYY",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "384 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48"
},
"id": "r5.12xlarge",
"name": "r5.12xlarge",
"ram": 393216
},
"r5.16xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "256",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.16xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "512 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64"
},
"id": "r5.16xlarge",
"name": "r5.16xlarge",
"ram": 524288
},
"r5.24xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "337",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "768 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96"
},
"id": "r5.24xlarge",
"name": "r5.24xlarge",
"ram": 786432
},
"r5.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "37",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.2xlarge",
"instancesku": "TX6VCU4AX7SURS3R",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "r5.2xlarge",
"name": "r5.2xlarge",
"ram": 65536
},
"r5.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "70",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "128 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16"
},
"id": "r5.4xlarge",
"name": "r5.4xlarge",
"ram": 131072
},
"r5.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "128",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.8xlarge",
"instancesku": "8J9E8648XRY8KRTV",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "256 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32"
},
"id": "r5.8xlarge",
"name": "r5.8xlarge",
"ram": 262144
},
"r5.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.large",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "r5.large",
"name": "r5.large",
"ram": 16384
},
"r5.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "3.1 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "19",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5.xlarge",
"instancesku": "RYY8KJSCAHAJ2S2N",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "r5.xlarge",
"name": "r5.xlarge",
"ram": 32768
},
"r5a.12xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.12xlarge",
"instancesku": "K4WF4MUAD8NH6F85",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "384 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48"
},
"id": "r5a.12xlarge",
"name": "r5a.12xlarge",
"ram": 393216
},
"r5a.16xlarge": {
"bandwidth": 12,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.16xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "512 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "64"
},
"id": "r5a.16xlarge",
"name": "r5a.16xlarge",
"ram": 524288
},
"r5a.24xlarge": {
"bandwidth": 20,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.24xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "768 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96"
},
"id": "r5a.24xlarge",
"name": "r5a.24xlarge",
"ram": 786432
},
"r5a.2xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.2xlarge",
"instancesku": "P7S4KAQ9D4GAFR54",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "64 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "r5a.2xlarge",
"name": "r5a.2xlarge",
"ram": 65536
},
"r5a.4xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.4xlarge",
"instancesku": "SDQK54UK2HRQTUXV",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "128 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "16"
},
"id": "r5a.4xlarge",
"name": "r5a.4xlarge",
"ram": 131072
},
"r5a.8xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.8xlarge",
"instancesku": "6B56HCXAHSRM98ZB",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "256 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32"
},
"id": "r5a.8xlarge",
"name": "r5a.8xlarge",
"ram": 262144
},
"r5a.large": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.large",
"instancesku": "UG5A9S2YU4GDQ6VS",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "16 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "r5a.large",
"name": "r5a.large",
"ram": 16384
},
"r5a.xlarge": {
"bandwidth": 10,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5a.xlarge",
"instancesku": "3QM2JUUU3729P3XF",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "32 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "r5a.xlarge",
"name": "r5a.xlarge",
"ram": 32768
},
"r5ad.12xlarge": {
"bandwidth": 10,
"disk": 1800,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.12xlarge",
"instancesku": "CVGPBG74JXGXFUPZ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "384 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "48"
},
"id": "r5ad.12xlarge",
"name": "r5ad.12xlarge",
"ram": 393216
},
"r5ad.16xlarge": {
"bandwidth": 12,
"disk": 2400,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.16xlarge",
"instancesku": "5ECTGJZ54PJWVXSR",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "512 GiB",
"networkPerformance": "12 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 600 NVMe SSD",
"vcpu": "64"
},
"id": "r5ad.16xlarge",
"name": "r5ad.16xlarge",
"ram": 524288
},
"r5ad.24xlarge": {
"bandwidth": 20,
"disk": 3600,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.24xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "768 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96"
},
"id": "r5ad.24xlarge",
"name": "r5ad.24xlarge",
"ram": 786432
},
"r5ad.2xlarge": {
"bandwidth": 10,
"disk": 300,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.2xlarge",
"instancesku": "GUVRRZUCEA4W2NA2",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "64 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8"
},
"id": "r5ad.2xlarge",
"name": "r5ad.2xlarge",
"ram": 65536
},
"r5ad.4xlarge": {
"bandwidth": 10,
"disk": 600,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.4xlarge",
"instancesku": "VT3PC6G59VCVCDVN",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "128 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 300 NVMe SSD",
"vcpu": "16"
},
"id": "r5ad.4xlarge",
"name": "r5ad.4xlarge",
"ram": 131072
},
"r5ad.8xlarge": {
"bandwidth": 10,
"disk": 1200,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.8xlarge",
"instancesku": "Y7ZUMCMQMBHWWD67",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "256 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 600 NVMe SSD",
"vcpu": "32"
},
"id": "r5ad.8xlarge",
"name": "r5ad.8xlarge",
"ram": 262144
},
"r5ad.large": {
"bandwidth": 10,
"disk": 75,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.large",
"instancesku": "DZNY4VEMMSVRTR73",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "16 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2"
},
"id": "r5ad.large",
"name": "r5ad.large",
"ram": 16384
},
"r5ad.xlarge": {
"bandwidth": 10,
"disk": 150,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5ad.xlarge",
"instancesku": "X9QQDFJ8MS8FB32D",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "32 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4"
},
"id": "r5ad.xlarge",
"name": "r5ad.xlarge",
"ram": 32768
},
"r5d.12xlarge": {
"bandwidth": 10,
"disk": 1800,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6000 Mbps",
"ecu": "168",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.12xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "384 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "48"
},
"id": "r5d.12xlarge",
"name": "r5d.12xlarge",
"ram": 393216
},
"r5d.16xlarge": {
"bandwidth": 20,
"disk": 2400,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "256",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.16xlarge",
"instancesku": "7A8KJ5V5P6RMFWQ8",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "512 GiB",
"networkPerformance": "20 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 600 NVMe SSD",
"vcpu": "64"
},
"id": "r5d.16xlarge",
"name": "r5d.16xlarge",
"ram": 524288
},
"r5d.24xlarge": {
"bandwidth": 25,
"disk": 3600,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "337",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.24xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "768 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96"
},
"id": "r5d.24xlarge",
"name": "r5d.24xlarge",
"ram": 786432
},
"r5d.2xlarge": {
"bandwidth": 10,
"disk": 300,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "37",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "64 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8"
},
"id": "r5d.2xlarge",
"name": "r5d.2xlarge",
"ram": 65536
},
"r5d.4xlarge": {
"bandwidth": 10,
"disk": 600,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "70",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "128 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 300 NVMe SSD",
"vcpu": "16"
},
"id": "r5d.4xlarge",
"name": "r5d.4xlarge",
"ram": 131072
},
"r5d.8xlarge": {
"bandwidth": 10,
"disk": 1200,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "128",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.8xlarge",
"instancesku": "KDH339PT4BZYF4KZ",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "256 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 600 NVMe SSD",
"vcpu": "32"
},
"id": "r5d.8xlarge",
"name": "r5d.8xlarge",
"ram": 262144
},
"r5d.large": {
"bandwidth": 10,
"disk": 75,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "10",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.large",
"instancesku": "GGDAM58JTEWKNJHM",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "16 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2"
},
"id": "r5d.large",
"name": "r5d.large",
"ram": 16384
},
"r5d.xlarge": {
"bandwidth": 10,
"disk": 150,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "19",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "r5d.xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "32 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8175 (Skylake)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4"
},
"id": "r5d.xlarge",
"name": "r5d.xlarge",
"ram": 32768
},
"r5dn.12xlarge": {
"bandwidth": 50,
"disk": 1800,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "384 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 GB NVMe SSD",
"vcpu": "48"
},
"id": "r5dn.12xlarge",
"name": "r5dn.12xlarge",
"ram": 393216
},
"r5dn.16xlarge": {
"bandwidth": 75,
"disk": 2400,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "10000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.16xlarge",
"instancesku": "7V498XTUEEHCJRGZ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "512 GiB",
"networkPerformance": "75 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 600 NVMe SSD",
"vcpu": "64"
},
"id": "r5dn.16xlarge",
"name": "r5dn.16xlarge",
"ram": 524288
},
"r5dn.24xlarge": {
"bandwidth": 100,
"disk": 3600,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.24xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "768 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "4 x 900 NVMe SSD",
"vcpu": "96"
},
"id": "r5dn.24xlarge",
"name": "r5dn.24xlarge",
"ram": 786432
},
"r5dn.2xlarge": {
"bandwidth": 25,
"disk": 300,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.2xlarge",
"instancesku": "SV48KRP8HM2DH2UC",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "64 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8"
},
"id": "r5dn.2xlarge",
"name": "r5dn.2xlarge",
"ram": 65536
},
"r5dn.4xlarge": {
"bandwidth": 25,
"disk": 600,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.4xlarge",
"instancesku": "KFFG95QS5WUKQP8D",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "128 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 300 NVMe SSD",
"vcpu": "16"
},
"id": "r5dn.4xlarge",
"name": "r5dn.4xlarge",
"ram": 131072
},
"r5dn.8xlarge": {
"bandwidth": 25,
"disk": 1200,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.8xlarge",
"instancesku": "WJB3V98YXM2CZ9US",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "256 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 600 NVMe SSD",
"vcpu": "32"
},
"id": "r5dn.8xlarge",
"name": "r5dn.8xlarge",
"ram": 262144
},
"r5dn.large": {
"bandwidth": 25,
"disk": 75,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.large",
"instancesku": "5SM96WH3QZND6QG9",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2"
},
"id": "r5dn.large",
"name": "r5dn.large",
"ram": 16384
},
"r5dn.xlarge": {
"bandwidth": 25,
"disk": 150,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5dn.xlarge",
"instancesku": "KW4K9UDQ493ER24R",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4"
},
"id": "r5dn.xlarge",
"name": "r5dn.xlarge",
"ram": 32768
},
"r5n.12xlarge": {
"bandwidth": 50,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "6000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.12xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "384 GiB",
"networkPerformance": "50 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "48"
},
"id": "r5n.12xlarge",
"name": "r5n.12xlarge",
"ram": 393216
},
"r5n.16xlarge": {
"bandwidth": 75,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"ecu": "NA",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.16xlarge",
"instancesku": "8V7ZBPH5MUDT6TF5",
"memory": "512 GiB",
"networkPerformance": "75 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "NA",
"vcpu": "64"
},
"id": "r5n.16xlarge",
"name": "r5n.16xlarge",
"ram": 524288
},
"r5n.24xlarge": {
"bandwidth": 100,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "12000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.24xlarge",
"instancesku": "84XC4EP5DDTNC8ZA",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "768 GiB",
"networkPerformance": "100 Gigabit",
"normalizationSizeFactor": "192",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "96"
},
"id": "r5n.24xlarge",
"name": "r5n.24xlarge",
"ram": 786432
},
"r5n.2xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.2xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "64 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "r5n.2xlarge",
"name": "r5n.2xlarge",
"ram": 65536
},
"r5n.4xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"currentGeneration": "Yes",
"ecu": "NA",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.4xlarge",
"instancesku": "QDCRSRKZM3MX7XAG",
"memory": "128 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "NA",
"vcpu": "16"
},
"id": "r5n.4xlarge",
"name": "r5n.4xlarge",
"ram": 131072
},
"r5n.8xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "5000 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.8xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "256 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "32"
},
"id": "r5n.8xlarge",
"name": "r5n.8xlarge",
"ram": 262144
},
"r5n.large": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.large",
"instancesku": "KN56TRZD3CM72VXU",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "16 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "r5n.large",
"name": "r5n.large",
"ram": 16384
},
"r5n.xlarge": {
"bandwidth": 25,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "NA",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Memory optimized",
"instanceType": "r5n.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "32 GiB",
"networkPerformance": "Up to 25 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8259 (Cascade Lake)",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "r5n.xlarge",
"name": "r5n.xlarge",
"ram": 32768
},
"t1.micro": {
"bandwidth": None,
"disk": 0,
"extra": {
"currentGeneration": "No",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "Micro instances",
"instanceType": "t1.micro",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "0.613 GiB",
"networkPerformance": "Very Low",
"normalizationSizeFactor": "0.5",
"physicalProcessor": "Variable",
"processorArchitecture": "32-bit or 64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1"
},
"id": "t1.micro",
"name": "t1.micro",
"ram": 627
},
"t2.2xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t2.2xlarge",
"instancesku": "WKK3DC7GCMFXGGYP",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "32 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "t2.2xlarge",
"name": "t2.2xlarge",
"ram": 32768
},
"t2.large": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t2.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "8 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "t2.large",
"name": "t2.large",
"ram": 8192
},
"t2.medium": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t2.medium",
"instancesku": "HSW33TU9JRSFYRKF",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "4 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "2",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "t2.medium",
"name": "t2.medium",
"ram": 4096
},
"t2.micro": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t2.micro",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "1 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "0.5",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1"
},
"id": "t2.micro",
"name": "t2.micro",
"ram": 1024
},
"t2.nano": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"instanceFamily": "General purpose",
"instanceType": "t2.nano",
"memory": "0.5 GiB",
"networkPerformance": "Low",
"normalizationSizeFactor": "0.25",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1"
},
"id": "t2.nano",
"name": "t2.nano",
"ram": 512
},
"t2.small": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t2.small",
"instancesku": "XQ8FKG65FR9Z4ZPR",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "2 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "1",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "32-bit or 64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "1"
},
"id": "t2.small",
"name": "t2.small",
"ram": 2048
},
"t2.xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "Up to 3.3 GHz",
"currentGeneration": "Yes",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t2.xlarge",
"intelAvx2Available": "No",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "16 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Family",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "t2.xlarge",
"name": "t2.xlarge",
"ram": 16384
},
"t3.2xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2048 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.2xlarge",
"instancesku": "56TSPRQERU8CAJ43",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "32 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "t3.2xlarge",
"name": "t3.2xlarge",
"ram": 32768
},
"t3.large": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2048 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.large",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "8 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "t3.large",
"name": "t3.large",
"ram": 8192
},
"t3.medium": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 1536 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.medium",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "4 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "2",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "t3.medium",
"name": "t3.medium",
"ram": 4096
},
"t3.micro": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 1536 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.micro",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "1 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "0.5",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "t3.micro",
"name": "t3.micro",
"ram": 1024
},
"t3.nano": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 1536 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.nano",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "0.5 GiB",
"networkPerformance": "Low",
"normalizationSizeFactor": "0.25",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "t3.nano",
"name": "t3.nano",
"ram": 512
},
"t3.small": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 1536 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.small",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "2 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "1",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "t3.small",
"name": "t3.small",
"ram": 2048
},
"t3.xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2048 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "No",
"instanceFamily": "General purpose",
"instanceType": "t3.xlarge",
"instancesku": "6YPUHM9X2FGUB8D2",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "16 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Skylake E5 2686 v5",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "t3.xlarge",
"name": "t3.xlarge",
"ram": 16384
},
"t3a.2xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.2xlarge",
"instancesku": "FSP74JWHB79FDG9T",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "32 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "16",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "8"
},
"id": "t3a.2xlarge",
"name": "t3a.2xlarge",
"ram": 32768
},
"t3a.large": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Upto 2120 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.large",
"instancesku": "QE64UKF9H7MH4QTP",
"memory": "8 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "4",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX, AVX2, AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "t3a.large",
"name": "t3a.large",
"ram": 8192
},
"t3a.medium": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2120 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.medium",
"instancesku": "F5K4DYDG9JZ549EQ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "4 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "2",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "t3a.medium",
"name": "t3a.medium",
"ram": 4096
},
"t3a.micro": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 1500 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.micro",
"instancesku": "C92KHZUPSGNAZA8G",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "1 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "0.5",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "t3a.micro",
"name": "t3a.micro",
"ram": 1024
},
"t3a.nano": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 1500 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.nano",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "0.5 GiB",
"networkPerformance": "Low",
"normalizationSizeFactor": "0.25",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "t3a.nano",
"name": "t3a.nano",
"ram": 512
},
"t3a.small": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 1500 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.small",
"instancesku": "JE8UDWJG8MPPHAZZ",
"intelAvx2Available": "No",
"intelAvxAvailable": "No",
"intelTurboAvailable": "No",
"memory": "2 GiB",
"networkPerformance": "Low to Moderate",
"normalizationSizeFactor": "1",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX; AVX2; AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "2"
},
"id": "t3a.small",
"name": "t3a.small",
"ram": 2048
},
"t3a.xlarge": {
"bandwidth": None,
"disk": 0,
"extra": {
"clockSpeed": "2.5 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Upto 2120 Mbps",
"ecu": "Variable",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "General purpose",
"instanceType": "t3a.xlarge",
"instancesku": "TBV6C3VKSXKFHHSC",
"memory": "16 GiB",
"networkPerformance": "Moderate",
"normalizationSizeFactor": "8",
"physicalProcessor": "AMD EPYC 7571",
"processorArchitecture": "64-bit",
"processorFeatures": "AVX, AVX2, AMD Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "EBS only",
"vcpu": "4"
},
"id": "t3a.xlarge",
"name": "t3a.xlarge",
"ram": 16384
},
"x1.16xlarge": {
"bandwidth": None,
"disk": 1920,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "174.5",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1.16xlarge",
"instancesku": "PCDN4738CD5856G7",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "976 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "128",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1920 SSD",
"vcpu": "64"
},
"id": "x1.16xlarge",
"name": "x1.16xlarge",
"ram": 999424
},
"x1.32xlarge": {
"bandwidth": None,
"disk": 3840,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "349",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1.32xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "1952 GiB",
"networkPerformance": "High",
"normalizationSizeFactor": "256",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1920 SSD",
"vcpu": "128"
},
"id": "x1.32xlarge",
"name": "x1.32xlarge",
"ram": 1998848
},
"x1e.16xlarge": {
"bandwidth": 10,
"disk": 1920,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "179",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1e.16xlarge",
"instancesku": "WGPFFCGF4UHBGVDH",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"memory": "1952 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "128",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 1920 SSD",
"vcpu": "64"
},
"id": "x1e.16xlarge",
"name": "x1e.16xlarge",
"ram": 1998848
},
"x1e.2xlarge": {
"bandwidth": 10,
"disk": 240,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1000 Mbps",
"ecu": "23",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1e.2xlarge",
"instancesku": "UA4QBJUCBNHDQX2B",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"memory": "244 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 240 SSD",
"vcpu": "8"
},
"id": "x1e.2xlarge",
"name": "x1e.2xlarge",
"ram": 249856
},
"x1e.32xlarge": {
"bandwidth": 25,
"disk": 3840,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "340",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1e.32xlarge",
"instancesku": "A52J7F6ERS7NFNBK",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"memory": "3904 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "256",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 1920 SSD",
"vcpu": "128"
},
"id": "x1e.32xlarge",
"name": "x1e.32xlarge",
"ram": 3997696
},
"x1e.4xlarge": {
"bandwidth": 10,
"disk": 480,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "1750 Mbps",
"ecu": "47",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1e.4xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"memory": "488 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "32",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 480 SSD",
"vcpu": "16"
},
"id": "x1e.4xlarge",
"name": "x1e.4xlarge",
"ram": 499712
},
"x1e.8xlarge": {
"bandwidth": 10,
"disk": 960,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "3500 Mbps",
"ecu": "91",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1e.8xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"memory": "976 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "64",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 960 SSD",
"vcpu": "32"
},
"id": "x1e.8xlarge",
"name": "x1e.8xlarge",
"ram": 999424
},
"x1e.xlarge": {
"bandwidth": 10,
"disk": 120,
"extra": {
"clockSpeed": "2.3 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "500 Mbps",
"ecu": "12",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "x1e.xlarge",
"instancesku": "F4FMFRXVW3TMHF72",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "No",
"memory": "122 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 120 SSD",
"vcpu": "4"
},
"id": "x1e.xlarge",
"name": "x1e.xlarge",
"ram": 124928
},
"z1d.12xlarge": {
"bandwidth": 25,
"disk": 1800,
"extra": {
"clockSpeed": "4.0 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "14000 Mbps",
"ecu": "235",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "z1d.12xlarge",
"instancesku": "3NPW8NJBUPE5PHWG",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "384 GiB",
"networkPerformance": "25 Gigabit",
"normalizationSizeFactor": "96",
"physicalProcessor": "Intel Xeon Platinum 8151",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "2 x 900 NVMe SSD",
"vcpu": "48"
},
"id": "z1d.12xlarge",
"name": "z1d.12xlarge",
"ram": 393216
},
"z1d.2xlarge": {
"bandwidth": 10,
"disk": 300,
"extra": {
"clockSpeed": "4.0 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "2333 Mbps",
"ecu": "45",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "z1d.2xlarge",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "64 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "16",
"physicalProcessor": "Intel Xeon Platinum 8151",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 300 NVMe SSD",
"vcpu": "8"
},
"id": "z1d.2xlarge",
"name": "z1d.2xlarge",
"ram": 65536
},
"z1d.3xlarge": {
"bandwidth": 10,
"disk": 450,
"extra": {
"currentGeneration": "Yes",
"ecu": "75",
"instanceFamily": "Memory optimized",
"instanceType": "z1d.3xlarge",
"memory": "96 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "24",
"physicalProcessor": "Intel Xeon Platinum 8151",
"processorArchitecture": "64-bit",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 450 NVMe SSD",
"vcpu": "12"
},
"id": "z1d.3xlarge",
"name": "z1d.3xlarge",
"ram": 98304
},
"z1d.6xlarge": {
"bandwidth": 10,
"disk": 900,
"extra": {
"clockSpeed": "4.0 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "7000 Mbps",
"ecu": "116",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "z1d.6xlarge",
"instancesku": "6KCEGJ35VRAHEDVX",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "192 GiB",
"networkPerformance": "10 Gigabit",
"normalizationSizeFactor": "48",
"physicalProcessor": "Intel Xeon Platinum 8151",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 900 NVMe SSD",
"vcpu": "24"
},
"id": "z1d.6xlarge",
"name": "z1d.6xlarge",
"ram": 196608
},
"z1d.large": {
"bandwidth": 10,
"disk": 75,
"extra": {
"clockSpeed": "4.0 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2333 Mbps",
"ecu": "12",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "z1d.large",
"instancesku": "A7FTGGDM8KVFNGQB",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "16 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "4",
"physicalProcessor": "Intel Xeon Platinum 8151",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 75 NVMe SSD",
"vcpu": "2"
},
"id": "z1d.large",
"name": "z1d.large",
"ram": 16384
},
"z1d.xlarge": {
"bandwidth": 10,
"disk": 150,
"extra": {
"clockSpeed": "4.0 GHz",
"currentGeneration": "Yes",
"dedicatedEbsThroughput": "Up to 2333 Mbps",
"ecu": "23",
"enhancedNetworkingSupported": "Yes",
"instanceFamily": "Memory optimized",
"instanceType": "z1d.xlarge",
"instancesku": "DAVJWHZW3NM7TCJW",
"intelAvx2Available": "Yes",
"intelAvxAvailable": "Yes",
"intelTurboAvailable": "Yes",
"memory": "32 GiB",
"networkPerformance": "Up to 10 Gigabit",
"normalizationSizeFactor": "8",
"physicalProcessor": "Intel Xeon Platinum 8151",
"processorArchitecture": "64-bit",
"processorFeatures": "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
"servicecode": "AmazonEC2",
"servicename": "Amazon Elastic Compute Cloud",
"storage": "1 x 150 NVMe SSD",
"vcpu": "4"
},
"id": "z1d.xlarge",
"name": "z1d.xlarge",
"ram": 32768
}
}
REGION_DETAILS = {
"ap-northeast-1": {
"api_name": "ec2_ap_northeast",
"country": "Japan",
"endpoint": "ec2.ap-northeast-1.amazonaws.com",
"id": "ap-northeast-1",
"instance_types": [
"a1.2xlarge",
"a1.4xlarge",
"a1.large",
"a1.medium",
"a1.xlarge",
"c1.medium",
"c1.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c3.large",
"c3.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"c5n.18xlarge",
"c5n.2xlarge",
"c5n.4xlarge",
"c5n.9xlarge",
"c5n.large",
"c5n.xlarge",
"cc2.8xlarge",
"cr1.8xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"g2.2xlarge",
"g2.8xlarge",
"g3.16xlarge",
"g3.4xlarge",
"g3.8xlarge",
"g3s.xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"hs1.8xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"i2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"m1.large",
"m1.medium",
"m1.small",
"m1.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m2.xlarge",
"m3.2xlarge",
"m3.large",
"m3.medium",
"m3.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"m5dn.12xlarge",
"m5dn.16xlarge",
"m5dn.24xlarge",
"m5dn.2xlarge",
"m5dn.4xlarge",
"m5dn.8xlarge",
"m5dn.large",
"m5dn.xlarge",
"m5n.12xlarge",
"m5n.16xlarge",
"m5n.24xlarge",
"m5n.2xlarge",
"m5n.4xlarge",
"m5n.8xlarge",
"m5n.large",
"m5n.xlarge",
"p2.16xlarge",
"p2.8xlarge",
"p2.xlarge",
"p3.16xlarge",
"p3.2xlarge",
"p3.8xlarge",
"p3dn.24xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"r5dn.12xlarge",
"r5dn.16xlarge",
"r5dn.24xlarge",
"r5dn.2xlarge",
"r5dn.4xlarge",
"r5dn.8xlarge",
"r5dn.large",
"r5dn.xlarge",
"r5n.12xlarge",
"r5n.16xlarge",
"r5n.24xlarge",
"r5n.2xlarge",
"r5n.4xlarge",
"r5n.8xlarge",
"r5n.large",
"r5n.xlarge",
"t1.micro",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge",
"x1e.16xlarge",
"x1e.2xlarge",
"x1e.32xlarge",
"x1e.4xlarge",
"x1e.8xlarge",
"x1e.xlarge",
"z1d.12xlarge",
"z1d.2xlarge",
"z1d.3xlarge",
"z1d.6xlarge",
"z1d.large",
"z1d.xlarge"
],
"signature_version": "2"
},
"ap-northeast-2": {
"api_name": "ec2_ap_northeast",
"country": "South Korea",
"endpoint": "ec2.ap-northeast-2.amazonaws.com",
"id": "ap-northeast-2",
"instance_types": [
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c3.large",
"c3.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"c5n.18xlarge",
"c5n.2xlarge",
"c5n.4xlarge",
"c5n.9xlarge",
"c5n.large",
"c5n.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"g2.2xlarge",
"g2.8xlarge",
"g3.16xlarge",
"g3.4xlarge",
"g3.8xlarge",
"g3s.xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"i2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"m3.2xlarge",
"m3.large",
"m3.medium",
"m3.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"p2.16xlarge",
"p2.8xlarge",
"p2.xlarge",
"p3.16xlarge",
"p3.2xlarge",
"p3.8xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge",
"x1e.16xlarge",
"x1e.2xlarge",
"x1e.32xlarge",
"x1e.4xlarge",
"x1e.8xlarge",
"x1e.xlarge",
"z1d.12xlarge",
"z1d.2xlarge",
"z1d.3xlarge",
"z1d.6xlarge",
"z1d.large",
"z1d.xlarge"
],
"signature_version": "4"
},
"ap-northeast-3": {
"api_name": "ec2_ap_northeast",
"country": "Japan",
"endpoint": "ec2.ap-northeast-3.amazonaws.com",
"id": "ap-northeast-3",
"instance_types": [
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c3.large",
"c3.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.18xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.18xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"m3.2xlarge",
"m3.large",
"m3.medium",
"m3.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge"
],
"signature_version": "4"
},
"ap-south-1": {
"api_name": "ec2_ap_south_1",
"country": "India",
"endpoint": "ec2.ap-south-1.amazonaws.com",
"id": "ap-south-1",
"instance_types": [
"a1.2xlarge",
"a1.4xlarge",
"a1.large",
"a1.medium",
"a1.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.18xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"c5n.18xlarge",
"c5n.2xlarge",
"c5n.4xlarge",
"c5n.9xlarge",
"c5n.large",
"c5n.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"i2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"p2.16xlarge",
"p2.8xlarge",
"p2.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge",
"x1e.16xlarge",
"x1e.2xlarge",
"x1e.32xlarge",
"x1e.4xlarge",
"x1e.8xlarge",
"x1e.xlarge",
"z1d.12xlarge",
"z1d.2xlarge",
"z1d.3xlarge",
"z1d.6xlarge",
"z1d.large",
"z1d.xlarge"
],
"signature_version": "4"
},
"ap-southeast-1": {
"api_name": "ec2_ap_southeast",
"country": "Singapore",
"endpoint": "ec2.ap-southeast-1.amazonaws.com",
"id": "ap-southeast-1",
"instance_types": [
"a1.2xlarge",
"a1.4xlarge",
"a1.large",
"a1.medium",
"a1.xlarge",
"c1.medium",
"c1.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c3.large",
"c3.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"c5n.18xlarge",
"c5n.2xlarge",
"c5n.4xlarge",
"c5n.9xlarge",
"c5n.large",
"c5n.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"g2.2xlarge",
"g2.8xlarge",
"g3.16xlarge",
"g3.4xlarge",
"g3.8xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"hs1.8xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"i2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"m1.large",
"m1.medium",
"m1.small",
"m1.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m2.xlarge",
"m3.2xlarge",
"m3.large",
"m3.medium",
"m3.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.metal",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"m5dn.12xlarge",
"m5dn.16xlarge",
"m5dn.24xlarge",
"m5dn.2xlarge",
"m5dn.4xlarge",
"m5dn.8xlarge",
"m5dn.large",
"m5dn.xlarge",
"m5n.12xlarge",
"m5n.16xlarge",
"m5n.24xlarge",
"m5n.2xlarge",
"m5n.4xlarge",
"m5n.8xlarge",
"m5n.large",
"m5n.xlarge",
"p2.16xlarge",
"p2.8xlarge",
"p2.xlarge",
"p3.16xlarge",
"p3.2xlarge",
"p3.8xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"r5dn.12xlarge",
"r5dn.16xlarge",
"r5dn.24xlarge",
"r5dn.2xlarge",
"r5dn.4xlarge",
"r5dn.8xlarge",
"r5dn.large",
"r5dn.xlarge",
"r5n.12xlarge",
"r5n.16xlarge",
"r5n.24xlarge",
"r5n.2xlarge",
"r5n.4xlarge",
"r5n.8xlarge",
"r5n.large",
"r5n.xlarge",
"t1.micro",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge",
"x1e.16xlarge",
"x1e.2xlarge",
"x1e.32xlarge",
"x1e.4xlarge",
"x1e.8xlarge",
"x1e.xlarge",
"z1d.12xlarge",
"z1d.2xlarge",
"z1d.3xlarge",
"z1d.6xlarge",
"z1d.large",
"z1d.xlarge"
],
"signature_version": "2"
},
"ap-southeast-2": {
"api_name": "ec2_ap_southeast_2",
"country": "Australia",
"endpoint": "ec2.ap-southeast-2.amazonaws.com",
"id": "ap-southeast-2",
"instance_types": [
"a1.2xlarge",
"a1.4xlarge",
"a1.large",
"a1.medium",
"a1.xlarge",
"c1.medium",
"c1.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c3.large",
"c3.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"c5n.18xlarge",
"c5n.2xlarge",
"c5n.4xlarge",
"c5n.9xlarge",
"c5n.large",
"c5n.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"f1.16xlarge",
"f1.2xlarge",
"f1.4xlarge",
"g2.2xlarge",
"g2.8xlarge",
"g3.16xlarge",
"g3.4xlarge",
"g3.8xlarge",
"g3s.xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"hs1.8xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"i2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"m1.large",
"m1.medium",
"m1.small",
"m1.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m2.xlarge",
"m3.2xlarge",
"m3.large",
"m3.medium",
"m3.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"p2.16xlarge",
"p2.8xlarge",
"p2.xlarge",
"p3.16xlarge",
"p3.2xlarge",
"p3.8xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"t1.micro",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge",
"x1e.16xlarge",
"x1e.2xlarge",
"x1e.32xlarge",
"x1e.4xlarge",
"x1e.8xlarge",
"x1e.xlarge",
"z1d.12xlarge",
"z1d.2xlarge",
"z1d.3xlarge",
"z1d.6xlarge",
"z1d.large",
"z1d.xlarge"
],
"signature_version": "2"
},
"ca-central-1": {
"api_name": "ec2_ca_central_1",
"country": "Canada",
"endpoint": "ec2.ca-central-1.amazonaws.com",
"id": "ca-central-1",
"instance_types": [
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"c5n.18xlarge",
"c5n.2xlarge",
"c5n.4xlarge",
"c5n.9xlarge",
"c5n.large",
"c5n.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"g3.16xlarge",
"g3.4xlarge",
"g3.8xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"p3.16xlarge",
"p3.2xlarge",
"p3.8xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge",
"x1e.16xlarge",
"x1e.2xlarge",
"x1e.32xlarge",
"x1e.4xlarge",
"x1e.8xlarge",
"x1e.xlarge"
],
"signature_version": "4"
},
"cn-north-1": {
"api_name": "ec2_cn_north",
"country": "China",
"endpoint": "ec2.cn-north-1.amazonaws.com.cn",
"id": "cn-north-1",
"instance_types": [],
"signature_version": "4"
},
"cn-northwest-1": {
"api_name": "ec2_cn_northwest",
"country": "China",
"endpoint": "ec2.cn-northwest-1.amazonaws.com.cn",
"id": "cn-northwest-1",
"instance_types": [],
"signature_version": "4"
},
"eu-central-1": {
"api_name": "ec2_eu_central",
"country": "Frankfurt",
"endpoint": "ec2.eu-central-1.amazonaws.com",
"id": "eu-central-1",
"instance_types": [
"a1.2xlarge",
"a1.4xlarge",
"a1.large",
"a1.medium",
"a1.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c3.large",
"c3.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"c5n.18xlarge",
"c5n.2xlarge",
"c5n.4xlarge",
"c5n.9xlarge",
"c5n.large",
"c5n.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"g2.2xlarge",
"g2.8xlarge",
"g3.16xlarge",
"g3.4xlarge",
"g3.8xlarge",
"g3s.xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"i2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"m3.2xlarge",
"m3.large",
"m3.medium",
"m3.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"m5dn.12xlarge",
"m5dn.16xlarge",
"m5dn.24xlarge",
"m5dn.2xlarge",
"m5dn.4xlarge",
"m5dn.8xlarge",
"m5dn.large",
"m5dn.xlarge",
"m5n.12xlarge",
"m5n.16xlarge",
"m5n.24xlarge",
"m5n.2xlarge",
"m5n.4xlarge",
"m5n.8xlarge",
"m5n.large",
"m5n.xlarge",
"p2.16xlarge",
"p2.8xlarge",
"p2.xlarge",
"p3.16xlarge",
"p3.2xlarge",
"p3.8xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"r5dn.12xlarge",
"r5dn.16xlarge",
"r5dn.24xlarge",
"r5dn.2xlarge",
"r5dn.4xlarge",
"r5dn.8xlarge",
"r5dn.large",
"r5dn.xlarge",
"r5n.12xlarge",
"r5n.16xlarge",
"r5n.24xlarge",
"r5n.2xlarge",
"r5n.4xlarge",
"r5n.8xlarge",
"r5n.large",
"r5n.xlarge",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge",
"x1e.16xlarge",
"x1e.2xlarge",
"x1e.32xlarge",
"x1e.4xlarge",
"x1e.8xlarge",
"x1e.xlarge",
"z1d.12xlarge",
"z1d.2xlarge",
"z1d.3xlarge",
"z1d.6xlarge",
"z1d.large",
"z1d.xlarge"
],
"signature_version": "4"
},
"eu-west-1": {
"api_name": "ec2_eu_west",
"country": "Ireland",
"endpoint": "ec2.eu-west-1.amazonaws.com",
"id": "eu-west-1",
"instance_types": [
"a1.2xlarge",
"a1.4xlarge",
"a1.large",
"a1.medium",
"a1.xlarge",
"c1.medium",
"c1.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c3.large",
"c3.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"c5n.18xlarge",
"c5n.2xlarge",
"c5n.4xlarge",
"c5n.9xlarge",
"c5n.large",
"c5n.xlarge",
"cc2.8xlarge",
"cr1.8xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"f1.16xlarge",
"f1.2xlarge",
"f1.4xlarge",
"g2.2xlarge",
"g2.8xlarge",
"g3.16xlarge",
"g3.4xlarge",
"g3.8xlarge",
"g3s.xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"h1.16xlarge",
"h1.2xlarge",
"h1.4xlarge",
"h1.8xlarge",
"hs1.8xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"i2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"m1.large",
"m1.medium",
"m1.small",
"m1.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m2.xlarge",
"m3.2xlarge",
"m3.large",
"m3.medium",
"m3.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"m5dn.12xlarge",
"m5dn.16xlarge",
"m5dn.24xlarge",
"m5dn.2xlarge",
"m5dn.4xlarge",
"m5dn.8xlarge",
"m5dn.large",
"m5dn.xlarge",
"m5n.12xlarge",
"m5n.16xlarge",
"m5n.24xlarge",
"m5n.2xlarge",
"m5n.4xlarge",
"m5n.8xlarge",
"m5n.large",
"m5n.xlarge",
"p2.16xlarge",
"p2.8xlarge",
"p2.xlarge",
"p3.16xlarge",
"p3.2xlarge",
"p3.8xlarge",
"p3dn.24xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"r5dn.12xlarge",
"r5dn.16xlarge",
"r5dn.24xlarge",
"r5dn.2xlarge",
"r5dn.4xlarge",
"r5dn.8xlarge",
"r5dn.large",
"r5dn.xlarge",
"r5n.12xlarge",
"r5n.16xlarge",
"r5n.24xlarge",
"r5n.2xlarge",
"r5n.4xlarge",
"r5n.8xlarge",
"r5n.large",
"r5n.xlarge",
"t1.micro",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge",
"x1e.16xlarge",
"x1e.2xlarge",
"x1e.32xlarge",
"x1e.4xlarge",
"x1e.8xlarge",
"x1e.xlarge",
"z1d.12xlarge",
"z1d.2xlarge",
"z1d.3xlarge",
"z1d.6xlarge",
"z1d.large",
"z1d.xlarge"
],
"signature_version": "2"
},
"eu-west-2": {
"api_name": "ec2_eu_west_london",
"country": "United Kingdom",
"endpoint": "ec2.eu-west-2.amazonaws.com",
"id": "eu-west-2",
"instance_types": [
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"g3.16xlarge",
"g3.4xlarge",
"g3.8xlarge",
"g3s.xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"p3.16xlarge",
"p3.2xlarge",
"p3.8xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge",
"z1d.12xlarge",
"z1d.2xlarge",
"z1d.3xlarge",
"z1d.6xlarge",
"z1d.large",
"z1d.xlarge"
],
"signature_version": "4"
},
"eu-west-3": {
"api_name": "ec2_eu_west_paris",
"country": "France",
"endpoint": "ec2.eu-west-3.amazonaws.com",
"id": "eu-west-3",
"instance_types": [
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.18xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge"
],
"signature_version": "4"
},
"sa-east-1": {
"api_name": "ec2_sa_east",
"country": "Brazil",
"endpoint": "ec2.sa-east-1.amazonaws.com",
"id": "sa-east-1",
"instance_types": [
"c1.medium",
"c1.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c3.large",
"c3.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"g2.2xlarge",
"g2.8xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"i2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"m1.large",
"m1.medium",
"m1.small",
"m1.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m2.xlarge",
"m3.2xlarge",
"m3.large",
"m3.medium",
"m3.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"t1.micro",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge"
],
"signature_version": "2"
},
"us-east-1": {
"api_name": "ec2_us_east",
"country": "USA",
"endpoint": "ec2.us-east-1.amazonaws.com",
"id": "us-east-1",
"instance_types": [
"a1.2xlarge",
"a1.4xlarge",
"a1.large",
"a1.medium",
"a1.xlarge",
"c1.medium",
"c1.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c3.large",
"c3.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"c5n.18xlarge",
"c5n.2xlarge",
"c5n.4xlarge",
"c5n.9xlarge",
"c5n.large",
"c5n.xlarge",
"cc2.8xlarge",
"cr1.8xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"f1.16xlarge",
"f1.2xlarge",
"f1.4xlarge",
"g2.2xlarge",
"g2.8xlarge",
"g3.16xlarge",
"g3.4xlarge",
"g3.8xlarge",
"g3s.xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"h1.16xlarge",
"h1.2xlarge",
"h1.4xlarge",
"h1.8xlarge",
"hs1.8xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"i2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"inf1.24xlarge",
"inf1.2xlarge",
"inf1.6xlarge",
"inf1.xlarge",
"m1.large",
"m1.medium",
"m1.small",
"m1.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m2.xlarge",
"m3.2xlarge",
"m3.large",
"m3.medium",
"m3.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"m5dn.12xlarge",
"m5dn.16xlarge",
"m5dn.24xlarge",
"m5dn.2xlarge",
"m5dn.4xlarge",
"m5dn.8xlarge",
"m5dn.large",
"m5dn.xlarge",
"m5n.12xlarge",
"m5n.16xlarge",
"m5n.24xlarge",
"m5n.2xlarge",
"m5n.4xlarge",
"m5n.8xlarge",
"m5n.large",
"m5n.xlarge",
"m6g.12xlarge",
"m6g.16xlarge",
"m6g.2xlarge",
"m6g.4xlarge",
"m6g.8xlarge",
"m6g.large",
"m6g.medium",
"m6g.xlarge",
"p2.16xlarge",
"p2.8xlarge",
"p2.xlarge",
"p3.16xlarge",
"p3.2xlarge",
"p3.8xlarge",
"p3dn.24xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"r5dn.12xlarge",
"r5dn.16xlarge",
"r5dn.24xlarge",
"r5dn.2xlarge",
"r5dn.4xlarge",
"r5dn.8xlarge",
"r5dn.large",
"r5dn.xlarge",
"r5n.12xlarge",
"r5n.16xlarge",
"r5n.24xlarge",
"r5n.2xlarge",
"r5n.4xlarge",
"r5n.8xlarge",
"r5n.large",
"r5n.xlarge",
"t1.micro",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge",
"x1e.16xlarge",
"x1e.2xlarge",
"x1e.32xlarge",
"x1e.4xlarge",
"x1e.8xlarge",
"x1e.xlarge",
"z1d.12xlarge",
"z1d.2xlarge",
"z1d.3xlarge",
"z1d.6xlarge",
"z1d.large",
"z1d.xlarge"
],
"signature_version": "2"
},
"us-east-2": {
"api_name": "ec2_us_east_ohio",
"country": "USA",
"endpoint": "ec2.us-east-2.amazonaws.com",
"id": "us-east-2",
"instance_types": [
"a1.2xlarge",
"a1.4xlarge",
"a1.large",
"a1.medium",
"a1.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"c5n.18xlarge",
"c5n.2xlarge",
"c5n.4xlarge",
"c5n.9xlarge",
"c5n.large",
"c5n.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"g3.16xlarge",
"g3.4xlarge",
"g3.8xlarge",
"g3s.xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"h1.16xlarge",
"h1.2xlarge",
"h1.4xlarge",
"h1.8xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"i2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.metal",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"m5dn.12xlarge",
"m5dn.16xlarge",
"m5dn.24xlarge",
"m5dn.2xlarge",
"m5dn.4xlarge",
"m5dn.8xlarge",
"m5dn.large",
"m5dn.xlarge",
"m5n.12xlarge",
"m5n.16xlarge",
"m5n.24xlarge",
"m5n.2xlarge",
"m5n.4xlarge",
"m5n.8xlarge",
"m5n.large",
"m5n.xlarge",
"p2.16xlarge",
"p2.8xlarge",
"p2.xlarge",
"p3.16xlarge",
"p3.2xlarge",
"p3.8xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"r5dn.12xlarge",
"r5dn.16xlarge",
"r5dn.24xlarge",
"r5dn.2xlarge",
"r5dn.4xlarge",
"r5dn.8xlarge",
"r5dn.large",
"r5dn.xlarge",
"r5n.12xlarge",
"r5n.16xlarge",
"r5n.24xlarge",
"r5n.2xlarge",
"r5n.4xlarge",
"r5n.8xlarge",
"r5n.large",
"r5n.xlarge",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge",
"x1e.16xlarge",
"x1e.2xlarge",
"x1e.32xlarge",
"x1e.4xlarge",
"x1e.8xlarge",
"x1e.xlarge",
"z1d.12xlarge",
"z1d.2xlarge",
"z1d.3xlarge",
"z1d.6xlarge",
"z1d.large",
"z1d.xlarge"
],
"signature_version": "4"
},
"us-gov-west-1": {
"api_name": "ec2_us_govwest",
"country": "US",
"endpoint": "ec2.us-gov-west-1.amazonaws.com",
"id": "us-gov-west-1",
"instance_types": [
"c1.medium",
"c1.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c3.large",
"c3.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"c5n.18xlarge",
"c5n.2xlarge",
"c5n.4xlarge",
"c5n.9xlarge",
"c5n.large",
"c5n.xlarge",
"cc2.8xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"f1.16xlarge",
"f1.2xlarge",
"f1.4xlarge",
"g3.16xlarge",
"g3.4xlarge",
"g3.8xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"hs1.8xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"i2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"m1.large",
"m1.medium",
"m1.small",
"m1.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m2.xlarge",
"m3.2xlarge",
"m3.large",
"m3.medium",
"m3.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"p2.16xlarge",
"p2.8xlarge",
"p2.xlarge",
"p3.16xlarge",
"p3.2xlarge",
"p3.8xlarge",
"p3dn.24xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"t1.micro",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge",
"x1e.16xlarge",
"x1e.2xlarge",
"x1e.32xlarge",
"x1e.4xlarge",
"x1e.8xlarge",
"x1e.xlarge"
],
"signature_version": "2"
},
"us-west-1": {
"api_name": "ec2_us_west",
"country": "USA",
"endpoint": "ec2.us-west-1.amazonaws.com",
"id": "us-west-1",
"instance_types": [
"c1.medium",
"c1.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c3.large",
"c3.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"c5n.18xlarge",
"c5n.2xlarge",
"c5n.4xlarge",
"c5n.9xlarge",
"c5n.large",
"c5n.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"f1.16xlarge",
"f1.2xlarge",
"f1.4xlarge",
"g2.2xlarge",
"g2.8xlarge",
"g3.16xlarge",
"g3.4xlarge",
"g3.8xlarge",
"g3s.xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"i2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"m1.large",
"m1.medium",
"m1.small",
"m1.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m2.xlarge",
"m3.2xlarge",
"m3.large",
"m3.medium",
"m3.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"t1.micro",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"z1d.12xlarge",
"z1d.2xlarge",
"z1d.3xlarge",
"z1d.6xlarge",
"z1d.large",
"z1d.xlarge"
],
"signature_version": "2"
},
"us-west-2": {
"api_name": "ec2_us_west_oregon",
"country": "US",
"endpoint": "ec2.us-west-2.amazonaws.com",
"id": "us-west-2",
"instance_types": [
"a1.2xlarge",
"a1.4xlarge",
"a1.large",
"a1.medium",
"a1.xlarge",
"c1.medium",
"c1.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c3.large",
"c3.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5d.12xlarge",
"c5d.18xlarge",
"c5d.24xlarge",
"c5d.2xlarge",
"c5d.4xlarge",
"c5d.9xlarge",
"c5d.large",
"c5d.xlarge",
"c5n.18xlarge",
"c5n.2xlarge",
"c5n.4xlarge",
"c5n.9xlarge",
"c5n.large",
"c5n.xlarge",
"cc2.8xlarge",
"cr1.8xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"d2.xlarge",
"f1.16xlarge",
"f1.2xlarge",
"f1.4xlarge",
"g2.2xlarge",
"g2.8xlarge",
"g3.16xlarge",
"g3.4xlarge",
"g3.8xlarge",
"g3s.xlarge",
"g4dn.12xlarge",
"g4dn.16xlarge",
"g4dn.2xlarge",
"g4dn.4xlarge",
"g4dn.8xlarge",
"g4dn.xlarge",
"h1.16xlarge",
"h1.2xlarge",
"h1.4xlarge",
"h1.8xlarge",
"hs1.8xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"i2.xlarge",
"i3.16xlarge",
"i3.2xlarge",
"i3.4xlarge",
"i3.8xlarge",
"i3.large",
"i3.xlarge",
"i3en.12xlarge",
"i3en.24xlarge",
"i3en.2xlarge",
"i3en.3xlarge",
"i3en.6xlarge",
"i3en.large",
"i3en.xlarge",
"inf1.24xlarge",
"inf1.2xlarge",
"inf1.6xlarge",
"inf1.xlarge",
"m1.large",
"m1.medium",
"m1.small",
"m1.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m2.xlarge",
"m3.2xlarge",
"m3.large",
"m3.medium",
"m3.xlarge",
"m4.10xlarge",
"m4.16xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.metal",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"m5ad.12xlarge",
"m5ad.16xlarge",
"m5ad.24xlarge",
"m5ad.2xlarge",
"m5ad.4xlarge",
"m5ad.8xlarge",
"m5ad.large",
"m5ad.xlarge",
"m5d.12xlarge",
"m5d.16xlarge",
"m5d.24xlarge",
"m5d.2xlarge",
"m5d.4xlarge",
"m5d.8xlarge",
"m5d.large",
"m5d.xlarge",
"m5dn.12xlarge",
"m5dn.16xlarge",
"m5dn.24xlarge",
"m5dn.2xlarge",
"m5dn.4xlarge",
"m5dn.8xlarge",
"m5dn.large",
"m5dn.xlarge",
"m5n.12xlarge",
"m5n.16xlarge",
"m5n.24xlarge",
"m5n.2xlarge",
"m5n.4xlarge",
"m5n.8xlarge",
"m5n.large",
"m5n.xlarge",
"p2.16xlarge",
"p2.8xlarge",
"p2.xlarge",
"p3.16xlarge",
"p3.2xlarge",
"p3.8xlarge",
"p3dn.24xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"r5ad.12xlarge",
"r5ad.16xlarge",
"r5ad.24xlarge",
"r5ad.2xlarge",
"r5ad.4xlarge",
"r5ad.8xlarge",
"r5ad.large",
"r5ad.xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.large",
"r5d.xlarge",
"r5dn.12xlarge",
"r5dn.16xlarge",
"r5dn.24xlarge",
"r5dn.2xlarge",
"r5dn.4xlarge",
"r5dn.8xlarge",
"r5dn.large",
"r5dn.xlarge",
"r5n.12xlarge",
"r5n.16xlarge",
"r5n.24xlarge",
"r5n.2xlarge",
"r5n.4xlarge",
"r5n.8xlarge",
"r5n.large",
"r5n.xlarge",
"t1.micro",
"t2.2xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.nano",
"t2.small",
"t2.xlarge",
"t3.2xlarge",
"t3.large",
"t3.medium",
"t3.micro",
"t3.nano",
"t3.small",
"t3.xlarge",
"t3a.2xlarge",
"t3a.large",
"t3a.medium",
"t3a.micro",
"t3a.nano",
"t3a.small",
"t3a.xlarge",
"x1.16xlarge",
"x1.32xlarge",
"x1e.16xlarge",
"x1e.2xlarge",
"x1e.32xlarge",
"x1e.4xlarge",
"x1e.8xlarge",
"x1e.xlarge",
"z1d.12xlarge",
"z1d.2xlarge",
"z1d.3xlarge",
"z1d.6xlarge",
"z1d.large",
"z1d.xlarge"
],
"signature_version": "2"
}
}
| apache-2.0 | -6,092,564,259,387,442,000 | 32.480187 | 111 | 0.459797 | false |
cXhristian/django-wiki | src/wiki/plugins/attachments/urls.py | 1 | 1332 | from __future__ import unicode_literals
from django.conf.urls import url
from wiki.plugins.attachments import views
urlpatterns = [
url(r'^$',
views.AttachmentView.as_view(),
name='attachments_index'),
url(r'^search/$',
views.AttachmentSearchView.as_view(),
name='attachments_search'),
url(r'^add/(?P<attachment_id>[0-9]+)/$',
views.AttachmentAddView.as_view(),
name='attachments_add'),
url(r'^replace/(?P<attachment_id>[0-9]+)/$',
views.AttachmentReplaceView.as_view(),
name='attachments_replace'),
url(r'^history/(?P<attachment_id>[0-9]+)/$',
views.AttachmentHistoryView.as_view(),
name='attachments_history'),
url(r'^download/(?P<attachment_id>[0-9]+)/$',
views.AttachmentDownloadView.as_view(),
name='attachments_download'),
url(r'^delete/(?P<attachment_id>[0-9]+)/$',
views.AttachmentDeleteView.as_view(),
name='attachments_delete'),
url(r'^download/(?P<attachment_id>[0-9]+)/revision/(?P<revision_id>[0-9]+)/$',
views.AttachmentDownloadView.as_view(),
name='attachments_download'),
url(r'^change/(?P<attachment_id>[0-9]+)/revision/(?P<revision_id>[0-9]+)/$',
views.AttachmentChangeRevisionView.as_view(),
name='attachments_revision_change'),
]
| gpl-3.0 | 7,799,358,734,287,169,000 | 38.176471 | 82 | 0.617868 | false |
elbeardmorez/quodlibet | quodlibet/quodlibet/plugins/events.py | 1 | 5845 | # -*- coding: utf-8 -*-
# Copyright 2005 Michael Urman, Joe Wreschnig
# 2014, 2017 Nick Boultbee
# 2017 Pete Beardmore
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import GObject
from quodlibet.util.dprint import print_e
from quodlibet.plugins import PluginHandler
from quodlibet.library.librarians import SongLibrarian
from quodlibet.util.songwrapper import SongWrapper, ListWrapper
from quodlibet.util.songwrapper import check_wrapper_changed
from quodlibet.util import connect_obj
from quodlibet.compat import listvalues
from quodlibet.errorreport import errorhook
class EventPlugin(object):
"""Plugins that run in the background and receive events.
Event plugins, unlike other plugins, are instantiated on startup and
the same instance is used even when the plugin is enabled or disabled.
Methods `plugin_on_*` can be overridden to provide behaviour hooks
"""
def plugin_on_song_started(self, song):
pass
def plugin_on_song_ended(self, song, stopped):
pass
def plugin_on_added(self, songs):
pass
def plugin_on_changed(self, songs):
pass
def plugin_on_removed(self, songs):
pass
def plugin_on_paused(self):
pass
def plugin_on_unpaused(self):
pass
def plugin_on_seek(self, song, msec):
pass
def plugin_on_error(self, song, error):
pass
def plugin_on_songs_selected(self, songs):
"""Called when the selection in main songlist changes"""
pass
def plugin_on_plugin_toggled(self, plugin, enabled):
pass
PLUGIN_INSTANCE = True
def enabled(self):
"""Called when the plugin is enabled."""
pass
def disabled(self):
"""Called when the plugin is disabled."""
pass
def list_signal_names(type_):
"""List of supported signal names for a GType, instance or class"""
type_ = getattr(type_, "__gtype__", type_)
names = []
if not type_.is_instantiatable() and not type_.is_interface():
return names
names.extend(GObject.signal_list_names(type_))
if type_.parent:
names.extend(list_signal_names(type_.parent))
for iface in type_.interfaces:
names.extend(list_signal_names(iface))
return names
def _map_signals(obj, prefix="plugin_on_", blacklist=None):
sigs = list_signal_names(obj)
if blacklist is None:
blacklist = []
sigs = [s for s in sigs if s not in blacklist]
sigs = [(s, prefix + s.replace('-', '_')) for s in sigs]
return sigs
class EventPluginHandler(PluginHandler):
def __init__(self, librarian=None, player=None,
songlist=None, pluginmanager=None):
if librarian:
sigs = _map_signals(librarian, blacklist=("notify",))
for event, handle in sigs:
def handler(librarian, *args):
self.__invoke(librarian, args[-1], *args[:-1])
librarian.connect(event, handler, event)
if librarian and player:
sigs = _map_signals(player, blacklist=("notify",))
for event, handle in sigs:
def cb_handler(librarian, *args):
self.__invoke(librarian, args[-1], *args[:-1])
connect_obj(player, event, cb_handler, librarian, event)
if songlist:
def __selection_changed_cb(songlist, selection):
songs = songlist.get_selected_songs()
self.__invoke(self.librarian, "songs_selected", songs)
songlist.connect("selection-changed", __selection_changed_cb)
if pluginmanager:
def __plugin_toggled_cb(pluginmanager, plugin, enabled):
self.__invoke(None, "plugin-toggled", plugin, enabled)
pluginmanager.connect("plugin-toggled", __plugin_toggled_cb)
self.librarian = librarian
self.__plugins = {}
self.__sidebars = {}
def __invoke(self, target, event, *args):
args = list(args)
# prep args
if isinstance(target, SongLibrarian):
librarian = target
if args and args[0]:
if isinstance(args[0], dict):
args[0] = SongWrapper(args[0])
elif isinstance(args[0], (set, list)):
args[0] = ListWrapper(args[0])
# look for overrides in handled plugins
for plugin in listvalues(self.__plugins):
method_name = 'plugin_on_' + event.replace('-', '_')
handler = getattr(plugin, method_name, None)
def overridden(obj, name):
return name in type(obj).__dict__
# call override
if overridden(plugin, method_name):
try:
handler(*args)
except Exception:
print_e("Error during %s on %s" %
(method_name, type(plugin)))
errorhook()
if isinstance(target, SongLibrarian):
if event not in ["removed", "changed"] and args:
from quodlibet import app
songs = args[0]
if not isinstance(songs, (set, list)):
songs = [songs]
songs = filter(None, songs)
check_wrapper_changed(librarian, app.window, songs)
def plugin_handle(self, plugin):
return issubclass(plugin.cls, EventPlugin)
def plugin_enable(self, plugin):
self.__plugins[plugin.cls] = plugin.get_instance()
def plugin_disable(self, plugin):
self.__plugins.pop(plugin.cls)
| gpl-2.0 | 5,648,647,886,682,548,000 | 31.115385 | 74 | 0.599658 | false |
echinopsii/net.echinopsii.ariane.community.core.injector | messaging/src/test/resources/InjectorComponentActionTest.py | 1 | 4228 | import uuid
import pika
import json
from pprint import pprint
__author__ = 'mffrench'
class Requestor(object):
def __init__(self, connection_, requestQ_):
self.connection = connection_
self.channel = connection.channel()
self.requestQ = requestQ_
self.channel.queue_declare(queue=requestQ_)
self.result = self.channel.queue_declare(exclusive=True)
self.callback_queue = self.result.method.queue
self.response = None
self.corr_id = None
def start(self):
self.channel.basic_consume(self.on_response, no_ack=True,
queue=self.callback_queue)
def stop(self):
self.channel.close()
self.connection.close()
def on_response(self, ch, method_frame, props, body):
if self.corr_id == props.correlation_id:
self.response = {'props': props, 'body': body }
def call(self, p=None, n=None):
self.response = None
self.corr_id = str(uuid.uuid4())
properties = pika.BasicProperties(content_type=None, content_encoding=None,
headers=p, delivery_mode=None,
priority=None, correlation_id=self.corr_id,
reply_to=self.callback_queue, expiration=None,
message_id=None, timestamp=None,
type=None, user_id=None,
app_id=None, cluster_id=None)
if n is not None:
self.channel.basic_publish(exchange='',
routing_key=self.requestQ,
properties=properties,
body=str(n))
else:
self.channel.basic_publish(exchange='',
routing_key=self.requestQ,
properties=properties,
body='')
while self.response is None:
self.connection.process_data_events()
return self.response
class Service(object):
def __init__(self, connection_, serviceQ_, cb_):
self.connection = connection_
self.channel = connection.channel()
self.channel.queue_declare(queue=serviceQ_)
self.serviceQ = serviceQ_
self.cb = cb_
def start(self):
self.channel.basic_consume(self.on_request, self.serviceQ)
self.channel.start_consuming()
def stop(self):
self.channel.stop_consuming()
self.channel.stop()
self.connection.stop()
def on_request(self, ch, method_frame, props, body):
try:
self.cb(ch, props, body)
except Exception as e:
print("### Packaging failed: {0}".format(e))
ch.basic_ack(delivery_tag=method_frame.delivery_tag)
client_properties = {
'product': 'Ariane',
'information': 'Ariane - Docker Injector',
'ariane.pgurl': 'ssh://localhost',
'ariane.osi': 'localhost',
'ariane.otm': 'DOCKops',
'ariane.app': 'Ariane',
'ariane.cmp': 'echinopsii'
}
credentials = pika.PlainCredentials('ariane', 'password')
parameters = pika.ConnectionParameters("localhost", 5672, '/ariane',
credentials=credentials, client_props=client_properties)
connection = pika.BlockingConnection(parameters)
requestorComp = Requestor(connection, 'remote.injector.comp')
requestorComp.start()
def localhost_component_service(channel, props, body):
cache_mgr_name = 'ARIANE_PLUGIN_DOCKER_COMPONENTS_CACHE_MGR'
registry_name = 'Ariane Docker plugin components registry'
registry_cache_id = 'ariane.community.plugin.docker.components.cache'
registry_cache_name = 'Ariane Docker plugin components cache'
operation = props.headers['OPERATION']
print(operation)
if operation == "REFRESH":
pass
else:
print("Unsupported operation " + str(operation))
localhost_comp_service = Service(connection, 'ariane.community.plugin.docker.components.cache.localhost', localhost_component_service)
localhost_comp_service.start() | agpl-3.0 | 9,007,169,291,546,052,000 | 35.773913 | 134 | 0.581362 | false |
ariestiyansyah/atlas | users/__init__.py | 1 | 2699 | # coding=utf-8
"""This is the main package for the application.
:copyright: (c) 2013 by Tim Sutton
:license: GPLv3, see LICENSE for more details.
"""
import os
import logging
from flask import Flask
from flask_mail import Mail
from flask.ext.appconfig import AppConfig
from users.database import db, migrate
APP = Flask(__name__)
# Load configuration from any possible means.
AppConfig(APP, default_settings="users.default_config")
def add_handler_once(logger, handler):
"""A helper to add a handler to a logger, ensuring there are no duplicates.
:param logger: The logger instance.
:type logger: logging.logger
:param handler: Hander instance to be added. It will not be
added if an instance of that Handler subclass already exists.
:type handler: logging.Handler
:returns: True if the logging handler was added
:rtype bool: bool
"""
class_name = handler.__class__.__name__
for logger_handler in logger.handlers:
if logger_handler.__class__.__name__ == class_name:
return False
logger.addHandler(handler)
return True
def setup_logger():
"""Set up our logger with sentry support.
"""
logger = logging.getLogger('user_map')
logger.setLevel(logging.DEBUG)
handler_level = logging.DEBUG
# create formatter that will be added to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log_dir = APP.config['LOG_DIR']
# so e.g. jenkins can override log dir.
if 'USER_MAP_LOGFILE' in os.environ:
file_name = os.environ['USER_MAP_LOGFILE']
else:
file_name = os.path.join(log_dir, 'user-map.log')
file_handler = logging.FileHandler(file_name)
file_handler.setLevel(handler_level)
# create console handler with a higher log level
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.ERROR)
#Set formatters
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
# add the handlers to the logger
add_handler_once(logger, file_handler)
add_handler_once(logger, console_handler)
setup_logger()
LOGGER = logging.getLogger('user_map')
# Mailer
mail = Mail(APP)
# backward-compat
APP.config['DATABASE'] = APP.config['SQLITE_DB_PATH']
db.init_app(APP)
migration_dir = os.path.join(os.path.dirname(__file__), "migrations")
migrate.init_app(APP, db, directory=migration_dir)
# Don't import actual view methods themselves - see:
# http://flask.pocoo.org/docs/patterns/packages/#larger-applications
# Also views must be imported AFTER app is created above.
# noinspection PyUnresolvedReferences
import users.views # noqa
| gpl-2.0 | -3,178,633,222,398,445,600 | 28.336957 | 79 | 0.702482 | false |
pgroudas/pants | src/python/pants/base/payload_field.py | 1 | 9112 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
from abc import abstractmethod
from hashlib import sha1
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot
from pants.base.validation import assert_list
from pants.util.meta import AbstractClass
def stable_json_dumps(obj):
return json.dumps(obj, ensure_ascii=True, allow_nan=False, sort_keys=True)
def stable_json_sha1(obj):
return sha1(stable_json_dumps(obj)).hexdigest()
def combine_hashes(hashes):
"""A simple helper function to combine other hashes. Sorts the hashes before rolling them in."""
hasher = sha1()
for h in sorted(hashes):
hasher.update(h)
return hasher.hexdigest()
class PayloadField(AbstractClass):
"""An immutable, hashable structure to be mixed into Payload instances."""
_fingerprint_memo = None
def fingerprint(self):
"""A memoized sha1 hexdigest hashing the contents of this PayloadField
The fingerprint returns either a bytestring or None. If the return is None, consumers of the
fingerprint may choose to elide this PayloadField from their combined hash computation.
"""
if self._fingerprint_memo is None:
self._fingerprint_memo = self._compute_fingerprint()
return self._fingerprint_memo
@abstractmethod
def _compute_fingerprint(self):
"""This method will be called and the result memoized for ``PayloadField.fingerprint``."""
pass
@property
def value(self):
return self
class FingerprintedMixin(object):
"""Mixin this class to make your class suitable for passing to FingerprintedField."""
def fingerprint(self):
"""Override this method to implement a fingerprint for your class.
:returns: a sha1 hexdigest hashing the contents of this structure."""
raise NotImplementedError()
class FingerprintedField(PayloadField):
"""Use this field to fingerprint any class that mixes in FingerprintedMixin.
The caller must ensure that the class properly implements fingerprint()
to hash the contents of the object.
"""
def __init__(self, value):
self._value = value
def _compute_fingerprint(self):
return self._value.fingerprint()
@property
def value(self):
return self._value
class SourcesField(PayloadField):
"""A PayloadField encapsulating specified sources."""
def __init__(self, sources_rel_path, sources, ref_address=None, filespec=None):
"""
:param sources_rel_path: path that sources parameter may be relative to
:param sources: list of strings representing relative file paths
:param ref_address: optional address spec of target that provides these sources
:param filespec: glob and exclude data that generated this set of sources
"""
self._rel_path = sources_rel_path
self._source_paths = assert_list(sources)
self._ref_address = ref_address
self._filespec = filespec
@property
def filespec(self):
return self._filespec
@property
def rel_path(self):
return self._rel_path
@property
def source_paths(self):
return self._source_paths
@property
def address(self):
"""Returns the address this sources field refers to (used by some derived classses)"""
return self._ref_address
@property
def num_chunking_units(self):
"""For tasks that require chunking, this is the number of chunk units this field represents.
By default, this is just the number of sources. Other heuristics might consider the number
of bytes or lines in the combined source files.
"""
if self._source_paths:
return len(self._source_paths)
return 1
def has_sources(self, extension=None):
if not self._source_paths:
return False
return any(source.endswith(extension) for source in self._source_paths)
def relative_to_buildroot(self):
"""All sources joined with ``self.rel_path``."""
return [os.path.join(self.rel_path, source) for source in self.source_paths]
def _compute_fingerprint(self):
hasher = sha1()
hasher.update(self._rel_path)
for source in sorted(self.relative_to_buildroot()):
hasher.update(source)
with open(os.path.join(get_buildroot(), source), 'rb') as f:
hasher.update(f.read())
return hasher.hexdigest()
class DeferredSourcesField(SourcesField):
""" A SourcesField that isn't populated immediately when the graph is constructed.
You must subclass this and provide a fingerprint implementation. Requires a task
to call populate() to provide its contents later during processing. For example,
if sources are in an archive, you might use the fingerprint of the archive. If they
are from an external artifact, you might take a fingerprint of the name and version of
the artifact.
"""
class AlreadyPopulatedError(Exception):
"""Raised when a DeferredSourcesField has already been populated."""
pass
class NotPopulatedError(Exception):
""" Raised when the PayloadField has not been populated yet."""
def __init__(self):
super(Exception, self).__init__(
"Field requires a call to populate() before this method can be called.")
def __init__(self, ref_address):
self._populated = False
super(DeferredSourcesField, self).__init__(sources_rel_path=None, sources=[],
ref_address=ref_address)
def populate(self, sources, rel_path=None):
"""Call this method to set the list of files represented by the target.
Intended to be invoked by the DeferredSourcesMapper task.
:param list sources: strings representing absolute paths of files to be included in the source set
:param string rel_path: common prefix for files.
"""
if self._populated:
raise self.AlreadyPopulatedError("Called with rel_path={rel_path} sources={sources}"
.format(rel_path=rel_path, sources=sources))
self._rel_path = rel_path
self._source_paths = assert_list(sources)
self._populated = True
@property
def source_paths(self):
if not self._populated:
raise self.NotPopulatedError()
return self._source_paths
def _compute_fingerprint(self):
"""A subclass must provide an implementation of _compute_fingerprint that can return a valid
fingerprint even if the sources aren't unpacked yet.
"""
if not self._populated:
raise self.NotPopulatedError()
return super(DeferredSourcesField, self)._compute_fingerprint()
class PythonRequirementsField(frozenset, PayloadField):
"""A frozenset subclass that mixes in PayloadField.
Must be initialized with an iterable of PythonRequirement instances.
"""
def _compute_fingerprint(self):
def fingerprint_iter():
for req in self:
# TODO(pl): See PythonRequirement note about version_filter
hash_items = (
repr(req._requirement),
req._repository,
req._name,
req._use_2to3,
req.compatibility,
)
yield stable_json_sha1(hash_items)
return combine_hashes(fingerprint_iter())
def hash_bundle(bundle):
hasher = sha1()
hasher.update(bundle._rel_path)
for abs_path in sorted(bundle.filemap.keys()):
buildroot_relative_path = os.path.relpath(abs_path, get_buildroot())
hasher.update(buildroot_relative_path)
hasher.update(bundle.filemap[abs_path])
with open(abs_path, 'rb') as f:
hasher.update(f.read())
return hasher.hexdigest()
class BundleField(tuple, PayloadField):
"""A tuple subclass that mixes in PayloadField.
Must be initialized with an iterable of Bundle instances.
"""
def _compute_fingerprint(self):
return combine_hashes(map(hash_bundle, self))
class ExcludesField(OrderedSet, PayloadField):
"""An OrderedSet subclass that mixes in PayloadField.
Must be initialized with an iterable of Excludes instances.
"""
def _compute_fingerprint(self):
return stable_json_sha1(tuple(repr(exclude) for exclude in self))
class ConfigurationsField(OrderedSet, PayloadField):
"""An OrderedSet subclass that mixes in PayloadField.
Must be initialized with an iterable of strings.
"""
def _compute_fingerprint(self):
return combine_hashes(sha1(s).hexdigest() for s in self)
class JarsField(tuple, PayloadField):
"""A tuple subclass that mixes in PayloadField.
Must be initialized with an iterable of JarDependency instances.
"""
def _compute_fingerprint(self):
return stable_json_sha1(tuple(jar.cache_key() for jar in self))
class PrimitiveField(PayloadField):
"""A general field for primitive types.
As long as the contents are JSON representable, their hash can be stably inferred.
"""
def __init__(self, underlying=None):
self._underlying = underlying
@property
def value(self):
return self._underlying
def _compute_fingerprint(self):
return stable_json_sha1(self._underlying)
| apache-2.0 | -8,005,049,777,657,307,000 | 31.312057 | 102 | 0.712028 | false |
nmoutschen/linux-utils | debian/checkup.py | 1 | 2457 | #!/usr/bin/env python3
"""
This script checks up the general state of the system (updates required, CPU
usage, etc.) and returns a string that can be used in an MOTD file.
You can use this script as part of a cron job to update the MOTD file in order
to display relevant system information to sysadmins upon login.
"""
import apt
import psutil
import socket
def get_pkg_count():
"""
Returns the number of packages that needs to be updated
"""
cache = apt.Cache()
cache.update()
cache.open()
cache.upgrade()
return len(cache.get_changes())
def get_cpu_usage():
"""
Returns the CPU usage as a percentage
"""
return psutil.cpu_percent(interval=1)
def get_mem_usage():
"""
Returns the amount of used RAM as a percentage of total available RAM
"""
return psutil.virtual_memory().percent
def get_disk_usage():
"""
Returns a list of tuples containing the partition name and the amount of
used space as a percentage of total space
"""
total = 0
used = 0
for part in psutil.disk_partitions():
part_usage = psutil.disk_usage(part.mountpoint)
total += part_usage.total
used += part_usage.used
return used/total*100
def get_open_servs():
"""
Returns a list of open services currently listening
"""
def get_service(conn):
if conn.type == socket.SOCK_STREAM:
type_ = 'tcp'
else:
type_ = 'udp'
return socket.getservbyport(conn.laddr[1], type_)
services = [get_service(conn) for conn in psutil.net_connections()
if conn.status == psutil.CONN_LISTEN]
services = list(set(services))
services.sort()
return services
if __name__ == "__main__":
pkg_count = get_pkg_count()
cpu_usage = get_cpu_usage()
mem_usage = get_mem_usage()
disk_usage = get_disk_usage()
open_servs = get_open_servs()
if pkg_count == 1:
print("\033[91mThere is currently 1 update available.\033[0m\n")
elif pkg_count > 1:
print("\033[91mThere are currently {} updates available.\033[0m\n".format(pkg_count))
print("\033[94mCPU Usage\033[0m: {:>5.1f}%".format(cpu_usage))
print("\033[94mMemory Usage\033[0m: {:>5.1f}%".format(mem_usage))
print("\033[94mDisk Usage\033[0m: {:>5.1f}%".format(disk_usage))
print()
print("\033[94mOpen Services\033[0m: {}".format(", ".join(open_servs)))
| mit | -1,833,325,935,985,679,600 | 23.57 | 93 | 0.625967 | false |
fullstory-morgue/liveusb-creator | liveusb/resources_rc.py | 1 | 82020 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Di Jun 24 12:09:14 2008
# by: The Resource Compiler for PyQt (Qt v4.4.0)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x03\x90\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\
\x67\x9b\xee\x3c\x1a\x00\x00\x03\x22\x49\x44\x41\x54\x38\x8d\x6d\
\x52\x4d\x6c\x54\x65\x14\x3d\xf7\x7b\xef\x95\xf9\xad\x33\x6d\x07\
\x5b\xa8\xb6\x0a\x86\x12\xa6\xa1\xa2\x33\x8d\x7f\xd1\x98\x46\x37\
\xc4\x60\x58\xa8\xbc\x44\x8c\x90\x81\xf0\xb3\x61\x07\x1b\x0c\x4d\
\x0c\x11\x74\x61\xa2\xe0\x03\xa3\x31\x4c\x25\x98\x34\x01\x37\xfc\
\x04\x21\x92\xd4\xc2\xab\x71\xd1\xd0\x92\xd4\xd2\xc6\x01\x46\x3b\
\xb4\x1d\xe8\xcc\xb4\xf3\xde\xfb\xbe\xeb\xa2\xf3\x92\x59\x70\x92\
\x9b\xdc\xc5\x3d\x27\x27\xe7\x5c\x62\x66\xf8\x48\x65\xb2\xcf\x34\
\xe8\xda\x11\x10\xde\xf4\xa4\x6a\xd5\x84\x98\x27\xc2\xb8\xe3\xca\
\x13\x00\x2e\xd8\x96\xe9\xd6\xdd\x1e\x04\x70\x82\x7c\x81\x74\x26\
\xbb\x45\xd3\xc4\x40\x4f\xd7\x2a\x7d\x5d\x67\xb3\xd1\xd6\x14\x01\
\x98\x51\x28\x56\x30\x32\x76\xbf\x7c\x67\xfa\xa1\x27\x15\xef\xb6\
\x2d\xf3\x6c\x7a\x57\x36\xc3\x8c\xef\x00\xc4\x88\x99\x91\xca\x64\
\xd7\x1b\xba\x18\xd9\xda\xd7\x1d\x8a\xc7\x82\xd0\x89\xd0\x1c\x0d\
\x20\x1c\xd0\x11\x0d\x19\x08\x1a\x02\x33\xc5\x45\x1c\xcf\x0e\x57\
\x0a\xc5\x8a\x1d\x0d\x35\xf4\x3e\x2e\x57\x0d\x66\x34\x11\x33\xe3\
\xd5\x3d\x67\x4f\xf5\xac\x6b\xfb\x34\x18\x30\xc4\xad\xd1\x1c\xaa\
\xae\x07\x43\xd7\xe4\x9a\xd5\xb1\xc5\xbe\xf4\xf3\xe1\xde\xf5\xad\
\xa4\x09\x82\x10\x84\x5f\xae\x4d\xa8\xb7\x5e\x6c\x17\x07\xbe\xbe\
\xea\x38\xae\x6c\xd1\x01\x80\x99\x3f\xbc\x33\x5d\x10\xae\xa7\x58\
\x31\x2f\x32\xe3\x33\xc7\x95\x67\xc6\xa7\x67\x93\x13\xf7\xe6\x8f\
\x5e\xef\x68\xee\xda\xb7\x75\x53\x28\x11\x0b\xe1\xe3\x77\xbb\x44\
\x2d\x06\x02\xa0\x04\x00\x48\xa9\xc2\x55\xc7\xf3\x92\x2f\x3c\xfb\
\x83\x94\xea\x11\x80\x41\xdb\x32\xf3\xb6\x65\x5e\xf9\xe3\xdb\x8f\
\x5e\x1a\x9d\x2c\xec\xdd\x7d\xec\x12\xee\x3f\x2c\x61\x7a\xa6\x0c\
\xa9\x18\x00\x03\x00\x0b\x2c\xaf\xaa\xe9\xa9\x48\x7f\x63\x24\xf0\
\x55\x67\xfb\xca\x6e\x00\x53\x75\x69\xeb\xba\x26\x76\x6e\x7b\x27\
\xe9\x76\x3e\x1d\x45\xa2\x71\x05\x34\x41\xbe\x03\xd6\x6b\x77\xf1\
\x5f\x3f\xdf\xbc\x80\x27\xe3\xb4\x27\xd5\x6b\x17\x87\x27\xab\x57\
\xed\xa9\x25\xdf\xbc\xe3\xaa\x00\x00\xf6\x5b\xa8\x02\x30\x6a\x04\
\xae\x9b\xdf\x00\xec\x04\xe0\x3e\x41\x58\xb7\x2d\xf3\x9e\x2f\xe0\
\x9d\x3f\xfa\xbe\x06\x10\x98\x19\x17\xed\x7f\xd4\xa9\xf3\x7f\xe5\
\xa4\x54\x3d\xb6\x65\x16\xeb\x59\x87\xb3\x63\xab\x4a\xe5\xa5\x8e\
\xdf\x47\xc6\x6f\x00\x88\xe9\x00\x40\x04\x10\x11\xee\xfe\x57\x86\
\x21\x18\x27\x07\xff\x24\x00\x5f\xd4\x93\x53\x99\x6c\x98\x88\xee\
\xf6\x6e\x5c\x7b\x26\x3f\x33\x97\x02\xa0\x01\xa8\x0a\x00\x10\x44\
\x04\x00\x1d\x89\x10\x56\xb7\x84\xf1\xe5\xfe\xb7\xa9\xad\x25\x72\
\xec\x95\x3d\x3f\xdf\x4a\x65\xb2\xe9\x9a\xc6\xf6\x15\x86\xae\xe7\
\xf2\xb3\x07\xf2\x85\xe2\xeb\x9a\x46\xae\x6d\x99\xae\x58\x76\x40\
\x78\x5c\x71\x71\x61\x68\x8a\x3d\x8f\xd1\xbe\x32\x8a\xc3\x3b\xde\
\x08\x7d\xd0\x97\x7c\xb9\xb5\x39\x72\x2d\xbd\x6b\xc0\x35\x74\xf1\
\x8d\x54\x32\x3e\xf7\x68\x01\x4d\x8d\x41\xd2\x84\x18\x04\x00\xff\
\x91\xa8\xff\xc7\xa1\xa5\x89\xdc\xdc\xe4\xa5\xe1\xc9\xce\xcc\x96\
\x4d\xe1\x44\x2c\x88\xee\xb5\x09\x7a\xae\x3d\x1e\x2a\x96\x5d\xfc\
\x3b\x5b\x82\xe3\x78\x14\x09\x1a\x38\x77\x79\xb4\xe2\x49\x75\x04\
\x00\x96\x1f\x49\x31\x4d\xe4\xe6\x4e\xdf\x3c\xb9\x2d\xf9\xa0\xb0\
\xf0\x49\xff\xf7\x37\xe6\x8f\x0f\xdc\x2c\x0d\xdf\xce\x23\x3f\x5b\
\x46\xd5\x95\x10\x44\x28\xcc\x97\xe4\xb9\xcb\xa3\x8b\x52\xa9\x8c\
\x6d\x99\x63\x00\xe0\xb7\xf0\x13\x80\xed\xb6\x65\x72\x2d\x30\x03\
\xc0\x7b\x0d\x86\xb6\x57\x31\x6f\x90\x52\xc5\x0d\x5d\x9b\x21\xc2\
\x50\xd5\x91\x87\x6c\xcb\xfc\xdb\x0f\xf7\x7f\x2e\x55\x76\x6a\x13\
\x65\xcc\xc6\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x3a\xb2\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\xae\x00\x00\x00\x48\x08\x06\x00\x00\x00\xae\x9f\x11\xb1\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\x00\x00\x00\x00\x00\xf9\x43\xbb\x7f\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd8\x06\x02\
\x14\x02\x32\xf6\x3e\x31\x67\x00\x00\x20\x00\x49\x44\x41\x54\x78\
\xda\xed\xbd\x77\x98\x1c\xe7\x75\xe6\xfb\xab\xd0\xd5\x39\xcf\xf4\
\xe4\x19\x00\x33\x48\x83\x0c\x02\x14\x03\x48\x49\x24\x45\x2a\x5b\
\x96\xb5\xab\xb0\xab\xeb\xb5\xbd\xd7\xe6\x63\xcb\x26\x1d\xa4\xf5\
\x3e\xba\xeb\xbd\xb2\xa5\xf5\x1f\x57\xbe\xfb\x58\xa6\x6c\xc9\x49\
\xd6\xb5\x56\xb2\xae\xef\x5a\xb6\x56\x96\x44\x8a\x0a\x24\x48\x22\
\x11\x44\x20\x01\x10\x19\x33\x83\x30\x98\x9e\xd0\xd3\xb9\xf2\xfd\
\xa3\xba\x6a\xba\x7b\x66\x40\x88\x24\x6c\x82\xa8\xf7\x79\xfa\x69\
\x0c\xba\xab\xea\xeb\x0a\xe7\xfd\xce\xf9\xce\x79\x8f\x60\xdb\xb6\
\x8d\x0f\x1f\x3e\x7c\xf8\xf0\x71\x93\x40\xf4\x4f\x81\x0f\x1f\x3e\
\x7c\xf8\xf0\x89\xcb\x87\x0f\x1f\x3e\x7c\xf8\xf0\x89\xcb\x87\x0f\
\x1f\x3e\x7c\xf8\xf0\x89\xcb\x87\x0f\x1f\x3e\x7c\xf8\xc4\xe5\xc3\
\x87\x0f\x1f\x3e\x7c\xf8\xc4\xe5\xc3\x87\x0f\x1f\x3e\x7c\xf8\xc4\
\xe5\xc3\x87\x0f\x1f\x3e\x7c\xe2\xf2\xe1\xc3\x87\x0f\x1f\x3e\x7c\
\xe2\xf2\xe1\xc3\x87\x0f\x1f\x3e\x5c\xc8\x37\xd5\x68\x5d\x8d\x0f\
\x61\xf9\xaf\x58\x96\x85\x65\x59\x18\x86\x81\x69\x9a\xb8\xc2\x20\
\xa2\x28\x22\xcb\x32\x92\x24\x21\x8a\x22\x82\xe0\xec\xc4\xfd\xdc\
\xfd\xfb\xfa\x60\xb5\xf0\x7e\xf3\xb0\x6c\x1b\x6c\xdb\x42\x10\x04\
\x9a\x45\x49\x6c\xdb\x46\x44\x42\x10\xc0\x16\x6c\xe7\x73\x77\xbb\
\xeb\xf8\x5d\x3e\x7c\xf8\xf0\xe1\xe3\x66\x24\xae\xe5\xf8\xcc\x76\
\x88\xc0\xb2\x2c\x34\x4d\xa3\x5c\x2e\x53\x28\x14\x28\x96\x4b\xe8\
\xba\x8e\x28\x8a\x44\x42\x61\x12\x89\x04\xc9\x64\x92\x50\x28\x84\
\x2c\xcb\x2d\x04\xf6\xd3\x3b\xaa\xd6\x12\xe3\x00\xcb\xb2\x31\xcd\
\x05\xd2\x6c\x26\x46\x11\xc9\x21\x4e\x59\x40\x14\xc5\x05\xc2\x14\
\x9a\xd8\xcf\x87\x0f\x1f\x3e\x7c\xbc\x89\x88\xeb\x15\x3c\xad\x7a\
\xbd\xce\xcc\xcc\x0c\xe7\xce\x8f\x71\xec\xd8\x31\x94\x70\x08\x51\
\x14\xd1\x34\x8d\x5a\xb9\xc2\xd0\x8a\x01\x56\xae\x5c\x49\x5f\x4f\
\x2f\xc9\x64\x92\x40\x20\xe0\x11\xc8\xab\x23\xaf\x26\xc6\x02\x0c\
\xc3\xa0\x5e\xaf\x53\xae\x56\xa8\xd7\xeb\x68\x75\xb5\xc5\xe3\x0b\
\x04\x82\x44\x22\x11\x62\x91\x30\x4a\x28\x88\x24\x49\x1e\x79\xd9\
\xd8\x08\xbe\xcb\xe5\xc3\x87\x0f\x1f\xaf\x4c\x05\x6f\x06\x91\x5d\
\xdb\xb6\x51\x55\x95\xe9\xe9\x69\x0e\x1c\x7c\x9e\x5a\x55\x25\x12\
\x8f\x71\x35\x3f\x45\xbd\x5e\xc7\x36\x2d\xa2\xd1\x28\x81\x40\x80\
\xd2\xfc\x1c\xb7\xdf\x7e\x3b\xc3\x2b\x57\x91\x48\x24\x08\x04\x02\
\xaf\xd2\xeb\x6a\x3d\xbe\x65\x59\x54\xab\x55\xa6\xa6\xa6\x38\x7c\
\xf4\x08\xb3\x85\x02\x7a\x5d\x07\xc1\x02\x5b\x44\x14\x45\x14\x45\
\x61\xdd\xda\xb5\xf4\xf6\x76\xd3\x91\xc9\x12\x0c\x3b\x9e\xdf\x6b\
\x3d\xbe\x0f\x1f\x3e\x7c\xf8\x1e\xd7\x4d\x46\x5a\xa6\x69\x52\x2a\
\x95\x38\x77\x7e\x0c\x5d\x33\x49\x65\x3b\x98\x2f\x16\xb1\x4c\x10\
\x05\x99\x40\x38\x80\x61\x99\xd4\x2b\x2a\xa9\x4c\x07\x47\x8f\x1e\
\x25\x1c\x0e\xa3\x28\x0a\x92\x24\x79\x9e\xcf\x6b\x81\x61\x18\x14\
\x8b\x45\x0e\x1f\x3d\x82\x66\x58\xcc\xcd\x17\x11\x90\x3c\xe2\x42\
\xb0\xc8\x84\x82\x1c\x3f\x71\x02\xb0\x50\x14\x85\x74\xc0\x09\x57\
\xba\x21\x4b\x7f\xa9\xcb\x87\x0f\x1f\x3e\x6e\x01\xe2\x72\x49\xa3\
\x50\x28\x70\xfc\xf8\x71\x6e\xbb\x7d\x27\xc5\x62\xb9\xb1\x86\x25\
\x91\xc9\x24\x48\xa5\x52\xe8\xba\xce\x7c\x71\x0e\x44\x81\x42\xa1\
\xc0\xc5\x8b\x17\xe9\xcc\x76\x10\x0c\x06\x5f\xc3\x5a\xd7\x02\x4c\
\xd3\xa4\x58\x2c\x52\xab\xd5\x38\x7d\x6e\x1c\xdd\x32\x17\x7d\x67\
\x7e\x7e\x9e\x0d\xeb\x47\x99\x9a\xce\xd3\xd9\xd9\x49\x2c\x16\x23\
\x10\x08\xf8\x77\xa1\x0f\x1f\xb7\x28\x0c\xc3\xc0\xb6\x6d\x2f\x99\
\xac\x5e\xaf\x33\x3f\x3f\x4f\xb1\x58\xc4\x34\xcd\x57\xbd\x5f\x49\
\x92\x58\xbf\x7e\x3d\xa1\x50\xc8\x27\xae\x37\xa6\xc7\x25\x60\x19\
\x26\xe5\x72\x99\x4c\x26\x83\xa2\x28\x84\xc3\x41\xc4\x82\x4d\x28\
\x14\xa4\xa3\xa3\x83\xee\xae\x2e\x6a\xb5\x0a\x08\x4e\x38\x2f\xd7\
\xd9\xcd\xe4\x95\x29\xca\xc3\x65\x92\xc9\xe4\xeb\xe2\xf5\x59\x96\
\x85\x56\x57\x31\x75\x03\x59\x96\xc1\x16\x9d\x4c\x42\x51\x5c\x58\
\xe7\xc2\xc2\xb6\x6d\x2a\x95\x0a\xb5\x5a\xad\x25\xeb\xd1\xf7\xb4\
\x7c\xf8\x78\xf3\xc3\xb6\x6d\xa6\xa6\xa6\xf8\xf1\x8f\x7f\xcc\xbe\
\x7d\xfb\x98\x9f\x9f\x47\xd3\x34\x34\x4d\xc3\x34\xcd\x45\x64\xf5\
\x5a\x26\xd4\x81\x40\x80\xb7\xbc\xe5\x2d\xfc\xd6\x6f\xfd\xd6\x9b\
\x6e\x39\xe2\x4d\xe1\x71\x59\x96\x85\xae\xeb\x24\x12\x09\xe2\xf1\
\x38\x92\x24\xa1\xaa\x2a\x96\x35\xdb\xc8\x36\x34\x10\x04\x01\x45\
\x51\x10\x6c\x30\xa5\x00\x13\x13\x13\x1e\x79\x58\x96\xf5\x9a\xbc\
\x2e\x97\xb8\x2c\xcb\xc9\x34\x94\x65\x19\x01\x9b\xf6\xe5\x43\x11\
\x27\x4d\x5e\xd5\x34\x6f\x86\xe5\xc3\x87\x8f\x5b\x83\xb0\x8e\x1f\
\x3f\xce\x17\xbe\xf0\x05\xae\x5e\xbd\xea\x25\x72\x99\xa6\xe9\x64\
\x1c\x37\xca\x75\xdc\xa5\x03\x6f\xf9\xa0\x91\x31\xdd\xbc\x1f\xd7\
\xae\xb8\xef\xae\xdd\x71\xed\x90\x37\x91\xd6\x34\xbe\xfd\xed\x6f\
\xf3\xfe\xf7\xbf\x9f\xd5\xab\x57\xfb\xc4\x75\x3d\x17\xe9\xb5\x30\
\xfc\x4f\xb7\xbd\x45\x7e\x66\x9a\xa9\xa9\x29\x0c\xcb\x24\x1e\x0d\
\xa3\xc8\x4e\xba\xba\x7b\xf1\xce\x9f\x3f\x4f\x36\x9b\x25\x1c\x0c\
\xa1\xc8\x01\x66\xab\xd3\xde\x0d\xe0\x5e\xf4\xd7\x63\x9c\xb6\xe0\
\xbc\x1c\x8f\x6b\x61\xbf\xa2\xed\x7a\x54\x96\x77\xa3\x35\xdf\x60\
\x37\xe2\x7c\xba\xdb\x34\x6f\xfb\x5a\xaf\xcb\xbf\xc4\xb5\xf7\xe1\
\xe3\xcd\x86\x7a\xbd\xce\x1f\xfc\xc1\x1f\x70\xfc\xf8\x71\x4a\xa5\
\x12\x82\x20\xd0\xdd\xdd\xcd\x1d\x77\xdc\x41\x4f\x4f\x8f\xb7\xde\
\x1e\x08\x04\x08\x04\x02\x5e\xad\xa9\xeb\x71\xb9\x89\x5d\x00\xba\
\xae\x93\xcf\xe7\xa9\xd5\x6a\xaf\xf8\x1c\xfe\xe9\x9f\xfe\x29\xa5\
\x52\x89\x27\x9e\x78\x82\x95\x2b\x57\x3a\x76\xc9\x27\xae\xe5\xf1\
\x5a\x0d\xd7\x2b\x6d\x3f\x39\x39\x89\xa6\x69\x9e\xf1\x4f\x24\x12\
\xac\x58\xb1\x82\xa7\x76\x3f\x4d\x24\x12\x21\x18\x0c\xa2\x28\x0a\
\x91\x60\x84\xfc\xec\x0c\x87\x0e\x1d\x22\x93\xc9\x90\x48\x24\x10\
\x6d\x38\x7a\xe8\x30\xd1\x68\xd4\x4b\xce\x78\xb5\x63\x6e\x9f\x09\
\xb9\xef\xaf\x44\x5c\x6e\xc1\xf1\xf5\x16\x3f\xbf\x5a\xd2\x6a\xdf\
\xf6\x46\x11\xca\x52\x24\xe9\xc3\x87\x0f\x87\xb4\x1e\x7d\xf4\x51\
\x26\x26\x26\x50\x55\x95\x7b\xee\xb9\x87\x7b\xef\xbd\x97\x48\x24\
\xd2\xf2\x5c\x36\x3f\xaf\xcd\x2f\xf7\xf9\x72\xa3\x43\xb3\xb3\xb3\
\xe8\xba\xfe\x8a\xc7\x75\xc9\xcf\xb6\x6d\x66\x66\x66\xd0\x75\xdd\
\x27\xae\x7f\x49\xf7\x5a\x10\x04\xca\xe5\x32\x73\x73\x73\xb8\xb5\
\x53\xb9\x5c\x8e\x60\x30\xd0\xf8\x0e\xe8\xba\x46\xa5\x52\xa1\xab\
\xab\x8b\x83\x07\x0f\x72\xef\xae\xbb\x51\x14\x99\x50\x28\x44\x22\
\x19\x63\xa0\xaf\x87\x0b\x17\x2e\x90\x8c\xc5\xb9\x74\xf1\x22\x97\
\x2f\x4d\x70\xf7\xdd\x77\x13\x8b\xc5\x5a\x66\x37\xd7\x3d\x2e\xf7\
\x86\x5b\x82\x5c\x04\x41\x40\x96\x24\x44\x5b\xc4\xc4\x19\x7f\x33\
\x71\x2d\x65\xd8\x5f\x8d\x57\xb4\x54\x08\x61\x29\xb2\xfa\x97\x80\
\x4f\x56\x3e\x7c\x2c\x86\x69\x9a\x7c\xfa\xd3\x9f\x66\x7c\x7c\x1c\
\x80\x47\x1e\x79\x84\x74\x3a\x8d\xae\xeb\x4c\x4d\x4d\x61\x9a\x26\
\x92\x24\x21\xcb\x32\xb1\x58\x8c\x60\x30\xd8\x42\x5a\xcd\xcb\x0f\
\xaa\xaa\x92\xcf\xe7\xbd\x6d\xae\x27\x13\xda\xdd\x4f\xfb\x5a\xba\
\x4f\x5c\xaf\x03\x31\x2d\x75\xb2\x5d\x23\x3c\x3d\x3d\x4d\xb9\x5c\
\x26\x16\x8b\xd1\xdf\xdf\xdf\x42\x15\xcd\x9b\x4a\x92\x44\x32\x99\
\x64\xd5\xaa\x55\x3c\xfd\xf4\xd3\x28\xb2\xc4\x8e\x1d\x3b\x50\x62\
\x21\x0c\xc3\x40\xd3\x34\x62\xb1\x18\x33\xd3\xd3\x7c\xeb\x5b\xdf\
\x62\xed\xda\xd5\x0c\x0c\x0c\x10\x8d\x46\xaf\x39\x0b\x69\xf6\x24\
\x5a\xc6\xe7\x52\x57\x93\xe7\xd4\x1c\x7b\x0e\x04\x02\x58\xb6\x8d\
\x24\x2c\xf6\xb8\x5e\x2f\xef\xaa\xfd\x7b\xd7\x4b\x76\xaf\x95\x8c\
\xda\xcf\x89\xfb\x77\xbb\x84\xd6\x52\x9f\xf9\xf0\x71\xab\xc0\xb6\
\x6d\x9e\x7b\xee\x39\x0e\x1f\x3e\x8c\x24\x49\x7c\xe2\x13\x9f\x20\
\x12\x89\x30\x3e\x3e\xce\xdc\xdc\x1c\xf3\xc5\x79\x2c\xcb\x44\x14\
\x04\x24\x39\x40\x3a\x95\xa6\xab\xab\xcb\x59\xd2\x08\x87\x3d\xb2\
\x71\x27\xee\xb3\xb3\xb3\xde\xb3\xe5\x7e\xf6\x4a\x1e\xd4\xab\x17\
\x57\xb8\xc5\x89\x6b\x29\xef\xa1\xd9\xe0\x2d\xf5\x6f\xcb\xb2\xb8\
\x78\xf1\x22\x96\x65\xd1\xd9\xd9\x49\x5f\x5f\x1f\xc0\x92\xee\xb1\
\x48\xc3\x28\x8a\x02\xa1\x50\x88\xde\xee\x1e\x6e\xdf\xb1\x93\x43\
\x87\x8e\x70\xe0\xc0\x41\xd6\x8d\xae\x27\x93\xc9\x70\x69\xe2\x22\
\x67\xce\x9c\x61\x62\x62\x82\xe1\xe1\x55\x8c\x8e\x8e\xd2\xdd\xdd\
\x4d\x38\x1c\x46\x92\xa4\x65\xc7\xe8\xde\x24\xed\x63\x5c\xea\x3b\
\xba\xae\xb7\xdc\x50\xd6\x52\xa4\xdc\x24\x13\xe5\xee\xcf\x4d\x0e\
\x69\x27\xee\x16\x49\xa8\xeb\x24\xa4\x76\x89\xa9\xf6\xed\x9b\xd7\
\xd5\xda\x49\xac\xf9\x98\xee\x39\x59\x8e\x74\x9a\xd7\xe8\xda\xcf\
\x4b\xf3\x43\xe3\xc6\xe7\x7d\xaf\xcc\xc7\xad\x06\x55\x55\xf9\xf2\
\x97\xbf\x8c\x2c\xcb\xdc\x73\xcf\x3d\x28\x8a\xc2\x85\x0b\x17\xb8\
\x78\xe9\x22\xd9\xce\x1c\x23\x6b\x36\xa2\x28\x61\x87\xb8\x44\x89\
\x4a\x6d\x8e\xb1\xb1\xf3\xcc\xcc\xcc\xd0\xd5\xd5\xe5\x79\x60\xae\
\x7c\xdd\x52\xcf\xa0\x61\x18\xd7\x24\xaf\x66\xe2\xf2\x3d\xae\x57\
\x11\x42\x6a\x26\x25\xf7\x84\x1b\x86\xd1\x62\x00\xab\xd5\x2a\x93\
\x93\x93\x08\x82\x40\x6f\x6f\x2f\x82\x20\x50\xaf\xd7\xa9\xd7\xeb\
\xd7\xbc\x30\x81\x40\x00\x45\x51\x3c\x77\x7b\x70\x70\x10\xdb\xb6\
\xb9\x70\xe1\x02\x47\x8e\x1c\xa1\x5c\x2e\x53\xaf\xd6\x48\xa7\xd3\
\x6c\xda\xb4\x89\xa1\xa1\x21\xfa\xfa\xfa\x88\xc7\xe3\x8b\x54\x2b\
\x9a\xc7\xe9\x8e\xd1\x4d\x51\x75\x49\xa9\x39\x83\xa7\xf9\x26\xaa\
\x56\xab\x94\x4a\x25\x4c\xd3\x44\x56\x82\x58\x4b\x79\x6b\x58\x2d\
\x19\x88\x9a\xa6\x51\xaf\xd7\xd1\x75\xbd\x65\x1c\xa2\x28\x7a\x21\
\x84\x66\x12\x59\x8a\xb0\xdc\x8c\x4a\xf7\x7c\xb6\x9f\x9f\xe6\x50\
\xa8\xae\xeb\x68\x9a\x86\xae\xeb\x4b\x92\x97\x9b\xd9\x14\x08\x04\
\x08\x06\x83\xde\x18\x9a\xbd\x2c\xd7\x83\xd5\x1a\x99\x91\xde\x35\
\x14\x40\xb0\x9d\xa4\x18\xf7\xb8\x8a\xa2\xbc\x0e\xba\x90\x3e\x7c\
\xdc\x7c\x38\x7e\xfc\x38\xe7\xce\x9d\x23\x99\x4c\xb2\x79\xf3\x66\
\xc6\xc6\xc6\xb8\x74\xf9\x22\x6b\xd6\x6d\x46\x14\x02\xd4\x6b\x3a\
\x96\x01\x8a\xa2\x20\x2a\x02\xa9\x44\x07\x5d\x9d\xfd\x8c\x5f\x3e\
\xcb\xde\xa3\xfb\xe8\x4e\x76\x11\x8b\xc6\xb0\x2c\x47\xf5\x67\x29\
\xef\xc9\x7d\xf6\x97\xaa\x05\x7d\xbd\xea\x53\x6f\xd9\x50\xa1\x6b\
\x5c\x0d\xc3\xa0\xae\xa9\x9e\x81\xaf\x54\x2a\x68\x9a\xe6\x15\xdb\
\x49\x92\x44\x2c\x16\xc3\x34\x4d\xf2\x33\xd3\x8b\x42\x51\x2d\x61\
\x44\x9c\xf8\x6e\x40\x91\x48\x27\x53\x64\xb3\x59\x52\x89\x24\x82\
\xe4\x2c\x46\xda\x88\x04\x94\x10\xf1\x58\x12\xb5\xae\x93\xe8\x4a\
\x39\x59\x3b\x41\x65\xe1\x3b\xcb\xa4\x95\x1a\x86\x41\xad\x56\xa3\
\x5c\x2e\x33\x5f\x2a\x52\xad\x56\x17\x1b\x69\xab\x71\x33\x08\x8e\
\xf1\x37\x0d\x47\x72\x6a\x2a\x3f\x89\x69\x9a\x84\x22\xb2\x13\x45\
\x14\x1a\x44\x62\x8b\x1e\x71\x89\xa2\x88\x69\x9a\x54\x2a\x15\xae\
\x5e\xbd\x8a\x28\x8a\x0b\x64\x28\x38\xe4\xa1\xc8\x01\x12\x89\x04\
\x89\x44\x82\x48\x24\xe2\xdd\x98\xed\x37\xa1\x69\x9a\x54\xab\x55\
\x0a\x85\x02\xf3\xf3\xf3\x2d\x84\xe4\x12\x47\x34\x1a\x25\x1a\x8d\
\x62\x59\x16\x33\xb3\x05\x26\x27\x27\xa9\x56\xab\x4b\x12\x97\x20\
\x08\xa4\x52\x29\xb2\x99\x0c\x99\x4c\x86\x64\x32\x89\x20\x48\x48\
\x92\x88\x6d\x83\xa6\xa9\x14\x8b\x45\x66\x66\x66\xb8\x7a\xf5\x2a\
\xe5\x4a\xcd\x3b\x27\x96\x00\x62\x83\xb8\x24\x49\x22\x1a\x8d\x32\
\xd0\xdf\x4b\x36\x9b\xf5\x42\xb2\xaf\x87\x42\x89\x0f\x1f\x6f\x74\
\x18\x86\xc1\xde\xbd\x7b\x91\x65\x99\xce\xce\x4e\xf6\xed\xdb\x87\
\x24\x4b\xac\x5d\xb7\x05\x41\x54\x38\x7b\xfa\x38\xf1\x78\x92\xbe\
\xbe\x01\x42\xa1\x60\x23\xa3\x50\x46\x96\x25\xd6\x0c\xaf\x27\x9b\
\x4e\xf1\x83\xe2\xff\xe4\xc4\x9e\x4b\x6c\x0a\x6d\xa4\xbf\xab\x9f\
\x4c\x26\x43\x28\x14\x5a\x32\x3a\xe4\x7a\x5e\xa1\x50\x88\x54\x2a\
\x45\x28\x14\xf2\x48\xcb\x27\xae\x57\x49\x5a\xee\x0c\x7d\xb6\x30\
\xc7\xec\xec\x2c\x57\xae\x5e\xe5\xd2\xc4\x04\xa9\x4c\x86\x90\xa2\
\xa0\x28\x0a\xf1\x78\x1c\xcb\xb2\x50\x1b\xe1\x40\xb1\x89\xa4\xda\
\x89\xc5\xf9\x7f\xab\x71\x83\xd8\x1c\x7f\xf9\x04\x43\x03\x83\xd0\
\x3f\x80\xac\x04\xc8\xe7\xf3\xcc\xcc\xcc\x10\x8b\xc5\xd0\x34\x8d\
\x8e\x8e\x0e\xc2\xe1\xb0\xf3\x63\x03\x22\x67\xcf\x9e\x45\x91\x03\
\x84\x42\x21\x2f\xab\xd0\x5d\xc0\x54\x55\x95\x42\xa1\xc0\xd4\xd4\
\x14\x67\xcf\x9f\x43\xd7\x75\xe2\xc9\x24\x21\x45\x41\x33\x0c\x4f\
\x13\xde\x4d\xbc\x10\xb1\x1a\xc1\x3f\x11\x49\x09\xd0\xd3\xd7\x87\
\x20\x48\x14\xcb\x25\x4f\xe6\xc9\x23\x2e\xc1\x6a\x78\x24\xa0\xea\
\x1a\x53\x53\x53\x5c\x18\x1f\x23\x95\x4a\x2d\x22\x10\xc3\x30\x50\
\xe4\x00\x6b\xd7\xae\xa5\xbb\xbb\xdb\xd3\x59\x6c\x26\x2f\xd3\x34\
\xd1\x34\x8d\xe9\xe9\x69\x0e\x1e\x3c\x88\x14\x90\x17\xed\xc7\x34\
\x4d\xb0\x6c\x46\x46\x46\x10\x45\x91\x63\x27\x8e\xd3\x91\xcd\x39\
\x37\xb5\x2c\x21\x58\x76\x83\x70\xf0\xde\x35\x4d\x63\xff\x81\x03\
\x6c\x18\x1d\xc5\xb6\x6d\x52\x29\x87\xf4\x2d\xcb\xa2\x54\x2a\x31\
\x3e\x3e\xce\xd9\xf3\xe7\x48\xa5\x52\xce\x83\x21\x4b\x48\x04\x68\
\xa6\x24\xd1\x76\x66\x82\x17\x2e\x5c\x40\x96\xe5\x96\xec\x4d\x1f\
\x3e\xde\xec\xd0\x75\x9d\x17\x5f\x7c\x11\x49\x92\xbc\x2c\xe7\x5c\
\x77\x0f\xb2\x1c\x22\x3f\x35\x49\x77\x57\x2f\x97\x2f\x8f\x33\x34\
\xb4\xc2\x23\x2e\x27\xba\xe1\x24\x5d\x64\x33\xbd\xdc\xb5\x7a\x0d\
\xef\x79\xe8\x0e\xbe\xff\xa3\x03\x3c\xfe\xa3\x27\xb8\xad\x70\x1b\
\x7d\x5d\xbd\xa4\xd3\xe9\x45\xcf\x92\x24\x49\x64\x32\x19\x52\xa9\
\x54\x8b\xed\x75\xbd\xb4\x37\x63\xb6\xef\x0d\x23\x2e\x77\xfd\xa6\
\x56\xab\x71\x75\x6a\x9a\xa3\x47\x8f\xa2\x1a\x3a\x99\x8e\x2c\x1d\
\xb9\x6e\x54\x5d\xa3\xa6\xea\xd4\xd4\x85\xb5\x2b\xdb\x75\x64\xec\
\x05\x63\x7a\xcd\x63\xd4\x6d\x72\x9d\xdd\x5c\xbe\x7c\x99\x68\x34\
\x4a\x28\x14\xe2\xc4\x89\x13\x64\x3a\xb2\x8c\x4d\x8c\x93\x4a\x65\
\xb0\x45\xc1\x0b\x37\xda\x9a\xc0\xca\x15\xc3\x4c\x4d\x4d\x91\xcd\
\x66\x89\xc7\xe3\x4e\x22\x85\x65\x51\xab\xd5\x98\x9a\xce\x73\xe2\
\xc4\x09\x2c\x13\x12\xa9\x0c\xe5\x72\x19\x55\x37\x50\x55\x1d\x44\
\xc1\x49\x63\x17\x05\xb0\xec\x46\x68\x6c\x61\xcc\xcd\x37\xc8\x82\
\xeb\xde\x7c\x83\x39\xff\x36\x75\x27\x3c\x38\x71\xe9\x22\x5b\xb7\
\xef\xa0\x58\x2c\x2e\x92\xcc\x90\x15\x19\x43\x37\xb8\x72\xe5\x8a\
\x97\xb6\xdf\x1c\xd6\x74\xbd\xa5\x7a\xbd\xce\xc4\xc4\x04\x99\x6c\
\x27\x85\x92\xb3\x9f\xe6\x9b\x54\x12\x65\x64\x41\xe4\xd2\xa5\x4b\
\x44\x22\x11\x27\x2c\x69\xa8\xa8\xba\xe9\x8c\xdb\xb2\xbd\xdf\xe5\
\xfe\x9e\xba\xae\x31\x30\x30\xc0\xcb\x27\x4f\x12\x08\x48\xc8\xb2\
\x48\x2c\x16\x43\xd7\x75\xae\xe6\xa7\x78\xf1\xd8\x4b\xa4\xb3\x9d\
\x94\xab\xf5\x96\x6b\xe6\x25\x62\xd8\x20\x58\x36\x8a\xa2\x30\x3b\
\x93\xa7\x5a\xad\x62\x18\xc6\x9b\xf6\x01\xf2\xe1\xa3\x1d\x9a\xa6\
\x31\x36\x36\xe6\x64\x18\xcb\x32\xe1\x48\x98\x48\x24\x4d\x7e\x6a\
\x92\x40\x40\x26\x9f\x9f\x64\xeb\xb6\xdb\x89\xc7\xe3\x28\x4a\xa0\
\xe1\x6d\xc9\x1e\x79\x29\x8a\x82\x76\x35\xc8\xc9\xce\x27\xd9\xf5\
\xe0\x28\x0f\xdd\xbb\x83\x6f\x7d\xff\x59\x4e\x1c\x78\x99\x5d\x43\
\x77\xd1\xd9\xd9\x49\x30\x18\x04\x20\x1c\x0e\x93\x4a\xa5\x90\x24\
\x09\x4d\xd3\x08\x04\x02\x9e\xed\x75\x89\xab\x5c\x2e\xfb\x1e\xd7\
\xf5\xc2\x5d\xc3\xc9\xe7\xf3\x1c\x3c\xf8\x02\xb9\x9e\x6e\x8a\x45\
\x27\xf4\xe6\x19\xf7\xa6\x10\x5a\x3b\x71\x39\xde\x05\x9e\x31\x5c\
\xea\x73\x70\xc8\xad\x54\x2a\x51\x2e\x97\x31\x0c\x83\xb9\xf9\x02\
\x16\x36\xdd\xdd\xdd\x20\x4a\x0d\x7f\xc8\x49\x49\x37\x71\x3c\x8c\
\x62\xb1\xe8\x85\x2a\x03\x81\x00\xaa\xaa\x72\xe5\xca\x15\x0e\x1d\
\x39\x4c\x7f\x7f\x3f\xf3\x25\xa7\x2d\x89\x14\x58\xe6\xf4\x48\xaf\
\xd6\x08\x5b\x58\x86\x80\xaa\xaa\xd4\x6a\x35\x0c\xc3\xf0\x8e\xd1\
\x6c\xf8\x01\x94\x60\x88\x99\x99\x19\xba\xbb\xbb\x49\x24\x12\x5e\
\xcc\xda\xf5\xba\xdc\xf3\x7b\xe5\xea\x24\x99\x74\x87\xb3\x16\xe5\
\x0e\xa7\xe9\xbc\x86\xc3\x61\xae\x4c\x8c\x37\x3c\xdb\xa8\x17\x46\
\x44\x5c\x7a\xec\x82\x0d\x55\xb5\x8e\xa6\x69\x4c\x4c\x4c\x10\x0e\
\x07\xbd\x3a\x92\x33\x67\xce\x30\xb8\x62\x05\xe5\x72\x75\xd9\x73\
\x23\xd8\x10\x0b\x47\xd8\xb3\x67\x0f\x1b\x46\xd7\xb5\xcc\xfa\x7c\
\xf8\xb8\x55\x42\x85\xae\x9d\x8b\xc5\xa3\x0c\xaf\xde\xcc\xcc\xf4\
\x34\x97\x2e\x5e\x20\x12\x89\xd0\xdd\xd3\x47\x3c\x1e\x6b\x90\x56\
\xa0\x31\x41\x94\x5b\x94\x33\x7a\x8c\xcd\x5c\x51\x7f\xc0\xf9\xc0\
\xf3\x44\x82\x09\xde\xf3\x81\xcd\x14\x6e\x17\xf9\xd2\x1f\xff\x03\
\xbd\xfb\x7b\x49\x44\xe3\x5e\x5b\xa6\xe6\xa5\x0f\xf7\x39\xab\x54\
\x2a\x00\xe4\xf3\x79\x56\xaf\x5e\xed\x7b\x5c\x3f\x8d\xb7\x35\x5f\
\x2a\x72\xe6\xdc\x59\xba\xba\xba\x28\x57\x2a\xce\x6c\x40\x68\x22\
\x1e\xb1\xb1\x66\x65\x4b\x4b\x12\xd3\x2b\xfd\x0d\xce\x9a\x90\xaa\
\xaa\x1e\x19\x94\xcb\x65\xd6\xad\x1d\x45\x33\x0d\xac\x46\xbe\xba\
\x60\x37\x0a\x83\x1b\x31\x5f\x55\x55\xbd\x24\x85\x4a\xa5\x42\x3e\
\x9f\xe7\xd0\xa1\x43\x0c\x0c\xae\xa0\x5c\xab\xbe\x62\x9a\xa9\x6d\
\xbb\xb1\x63\x6b\xd9\xdf\xdf\xac\x51\xd8\x4c\x5c\x1a\x2a\x86\x61\
\x78\x0d\x2e\x03\x92\xdc\xc6\x89\x0b\xe5\x00\xd5\x6a\xd5\xd1\x3f\
\x6c\xaa\xc3\x68\xde\xa7\x9b\x40\xe2\x25\x51\x78\xe7\xc7\x25\x0b\
\xd3\x3b\x3f\xa2\x28\x92\x4d\x77\x70\xea\xcc\x69\xd6\xae\xdf\xf0\
\x8a\x95\xf7\xab\x57\xaf\xe6\x85\x83\x07\x1a\xde\x96\x49\xa1\x50\
\x40\x53\x1d\xef\x53\x96\x15\xef\xf7\xb4\x9f\x0f\xd1\x86\xab\x53\
\x57\x48\xa7\x12\xa4\x93\x29\x22\xa1\x30\x01\x49\x46\x12\x44\xdf\
\xa2\xf9\xb8\x25\xe0\x8a\xe5\x46\xa3\x51\x36\x6c\xbc\x8d\xa9\xab\
\x57\x29\x97\xe6\xe9\xe9\xe9\xa7\x5a\x2d\xb1\x6a\xd5\x08\xe5\xab\
\x93\xfc\xe3\x5f\x7c\x8e\xb3\x95\x73\xc8\x08\x6c\xed\xbb\x9d\x5f\
\xfa\x83\x3f\x71\x6a\x40\x45\x91\x5c\xa6\x9f\xc8\xd5\x41\xca\x9d\
\x63\x94\xcd\x79\x4e\xcb\xcf\x92\xeb\x1a\xe1\xf7\x3e\xf3\x71\x1e\
\xfb\xcb\x6f\x71\xfa\x3b\xa7\x09\x19\xc1\x45\x09\x70\x2e\x79\xb9\
\xcb\x08\xd1\x68\x94\x5d\xbb\x76\xbd\xe1\x8a\x8f\x5f\x6b\xf4\xe5\
\x86\x11\x97\xae\xeb\xcc\xcd\xcd\x31\x33\x33\x43\x67\x47\x97\x27\
\x59\xe2\x12\x97\x20\x08\x2d\xc4\xf2\x6a\x89\x4b\x16\x9c\x64\x87\
\xab\x57\xaf\x92\x4e\xa7\xbd\x14\x77\x09\xdb\x73\x2a\xda\x89\xce\
\x34\x4d\x4f\x2b\xac\x54\x2a\x71\xe2\xc4\x09\x56\x0e\xaf\xa2\x5c\
\xad\x5f\xe7\x05\x16\x5b\x0c\xf7\x4f\xe3\x71\x09\x82\x80\x6d\x3a\
\xa1\xbe\x80\x28\xb5\x5c\x01\xd1\x6e\xf5\x58\xdd\x71\xb6\x27\x52\
\x34\xa7\xd1\xbb\x3a\x8b\xed\xc4\xe5\xdc\x18\x0b\x7a\x67\x81\x80\
\x93\xf0\xb1\x6a\xc5\x4a\xce\x9d\x3e\xc3\xe0\xca\x15\x4b\xd6\x5c\
\x79\xe1\x0e\xd3\x60\x64\xcd\x6a\xc6\xc6\xc6\x28\x95\x4a\x4c\x4e\
\x5d\x65\xdd\xe8\x46\x54\x55\x65\xa1\x03\xb4\xd8\x74\x3e\x2c\x44\
\x5b\x24\x18\x0c\x72\x60\xff\x5e\x46\x47\x47\xc9\x66\xb3\x0b\xc9\
\x25\xa2\x1f\x22\xf4\x71\x6b\xa0\x5c\x2e\xa3\xeb\x3a\x6b\xd6\xae\
\x26\x12\x49\x32\x3b\x33\x8b\xa1\xab\x28\x4a\x80\x2d\x5b\x77\x62\
\x9b\x06\x5f\xfd\xc2\x27\x39\xba\xf2\x1c\xa1\x4e\x10\x24\xf8\xe7\
\xb9\x27\xa8\x7e\xf2\x7f\xe3\x53\x8f\xfd\x3d\xa2\x24\x11\x8f\xc7\
\x58\x3f\xfe\x21\x0e\x44\xbe\x80\x1e\x52\xb1\x4c\x91\x49\xf9\x14\
\x65\x79\x9a\x4f\xfc\xea\x7b\xf9\xe1\xba\xa3\xfc\xf3\x17\x9f\x23\
\x56\x8c\x7a\x51\x98\xe6\xf7\x78\x3c\xce\x9d\x77\xde\xc9\x03\x0f\
\x3c\xc0\xc0\xc0\xc0\x1b\xae\x0b\xc5\x6b\x55\xda\xb9\x61\xc4\x65\
\x18\x06\x17\x2f\x5e\xa4\xa7\xa7\x07\x0b\x11\xa9\x2d\x35\xbc\x11\
\x71\x6b\xf9\x11\x4b\x11\x43\xf3\x67\xed\xdf\x13\x6d\x28\x14\x0a\
\x54\xab\x55\xd6\xae\x5d\x4d\x38\x1c\x46\x10\x04\xc7\x03\x59\xe6\
\x7c\xb8\x9e\x90\x61\x18\x94\xcb\x4e\xe3\xc7\x44\x32\x4d\x4d\x6d\
\x95\x44\x71\x8e\x25\x78\xe3\x68\x2e\xa6\x6d\xfe\xff\x9f\x6e\x86\
\xd1\x5a\x57\x21\xcb\xf2\x42\x86\x22\xad\x04\xa6\xeb\xba\x33\x7b\
\x12\xb8\xa6\x9e\xa1\x1b\xfa\x93\x85\xa5\x8f\xe5\xad\x77\x35\xb2\
\x36\x65\x59\xa6\x5c\x2e\x23\xd8\x26\xa2\x74\xed\x0e\xd0\xc9\x44\
\x9a\x17\x8f\x1e\x63\x6a\x3a\xcf\xf6\xed\xdb\xbd\xec\x25\x67\x9f\
\x52\xd3\x39\x10\x01\x09\x59\x10\x79\x7a\xf7\x4f\xe8\xed\xed\xa5\
\xbb\xbb\x9b\x54\x26\x4d\x38\x1a\x41\x0a\xc8\x7e\x21\xb2\x8f\x5b\
\x06\xee\x44\x73\xe3\xe6\xed\xcc\xcc\x4c\x73\xe5\xca\x04\xa1\x60\
\x88\xf5\xa3\x9b\x08\x85\x42\xbc\xf0\x0f\xff\x83\xa3\x9d\xe3\xc4\
\x57\x42\xb4\xcf\x21\xae\xfa\xb4\xcd\xa1\xab\x87\x98\xbb\x74\x91\
\xae\x95\xab\x10\x04\x81\xe1\xc1\xf5\x30\xf9\xeb\xec\xcf\x3e\x86\
\x1e\xac\x61\x99\x36\x45\x79\x86\x13\xd2\x8f\xb8\xe3\x9e\xed\xac\
\x5e\xd9\xcf\xd7\xff\xea\x27\xdc\xd7\x75\x1f\xab\x87\x47\x48\x26\
\x93\xde\xf3\x2c\xcb\x32\xd9\x6c\x96\xc1\xc1\x41\x12\x89\xc4\x9b\
\xee\x1c\xbf\xee\xc4\xe5\xba\xaa\x17\x2e\x5c\xc0\x30\x8c\xc6\xda\
\x8c\xfc\x8a\xdb\xb4\x1a\xb5\xf6\x19\xfd\x62\x54\xab\x55\x2a\xc5\
\x12\x99\x6c\x8a\x74\x3a\xe9\x89\x53\xba\xe4\x24\x8b\xc2\x22\x76\
\x77\x8f\xe5\xae\x0f\xcd\xce\xce\x32\x3e\x31\xc1\xc8\xda\x35\x4b\
\xd6\x73\x09\x82\xb4\x0c\x41\xb9\xe3\x93\x96\x21\xde\xe5\x66\x17\
\xad\xbf\x53\x92\x24\x90\xc4\x6b\x13\xd7\x75\xcc\x4a\x9c\x90\xa3\
\xb8\x6c\xd8\xc2\xfd\x4e\x30\x18\x24\x1c\x0e\x33\x30\x30\xc0\xc1\
\x83\x07\xd9\xb2\x75\x3b\x86\xbd\x3c\x29\x8a\x36\xdc\x7f\xff\xfd\
\x5c\x18\x3b\xe7\x64\x66\x8a\x72\x9b\x8a\x88\x8d\x6d\x8b\x80\x93\
\xd2\x3f\x37\x37\x43\x38\x1c\xa6\xb7\xb7\xd7\xeb\x37\xa6\x28\x4e\
\x09\x82\xdf\xb4\xc5\xc7\xad\x02\xf7\x59\x0b\x06\x15\x82\x4a\x98\
\xb5\x6b\x37\xa2\xd6\x6b\x8d\xec\x60\x99\x5a\x71\x16\x21\x69\x10\
\xe9\x81\xe4\x1a\x10\x64\x90\xc2\x50\x1e\x80\x99\xf1\x71\xba\x57\
\x0d\x7b\xdd\x2c\xd6\xf4\x6d\x22\x74\xf5\x53\x3c\x1b\xfd\x63\xea\
\xa1\x59\x2c\x53\xc4\x92\xab\x9c\x95\xf7\x90\xeb\x1b\xe1\xd1\x4f\
\x7d\x80\x2f\x7d\xe5\x7f\xf1\x83\xbf\x79\x92\x9e\x64\x17\x0f\x3e\
\xf8\x20\xef\x7c\xe7\x3b\x3d\xc1\xde\x70\x38\xfc\x86\x55\xd0\x78\
\xc3\x85\x0a\x2f\x5c\xb8\xe0\xd5\x64\xc9\xb2\x8c\x20\x05\x96\x35\
\x8c\x6e\x8a\x3b\x80\x8d\xd9\x14\x13\x6c\x90\x86\xdb\x41\xb8\xf1\
\x6f\xc3\x30\x98\xce\xcf\x12\x8d\x46\xe9\xeb\x77\x34\x08\x13\x89\
\x44\xa3\x76\x41\x5e\x68\x0f\xb0\xcc\x39\x71\x43\x6f\xaa\xaa\x52\
\xae\x56\x58\xb1\x72\xd0\x0b\x65\x09\xb6\x1b\x92\x13\xdb\xc6\xe9\
\x18\x66\x41\x6c\x14\xe2\x5a\x42\xc3\x60\xbb\xe3\x13\x1a\x99\x24\
\x0d\x0f\x51\x90\x96\x21\xe6\x56\x92\x70\x52\xf1\x97\xd0\x19\x34\
\xad\x6b\xaa\x79\xb4\xc3\x29\x32\x5e\x4a\xeb\xd0\xc2\x34\x75\x6f\
\xbf\x6e\xad\x47\x2e\x97\x63\x64\x64\x84\xd3\x67\x4e\xb2\x7a\xf5\
\x6a\x6c\x41\x5a\xf6\x26\xb2\x80\xfe\xc1\x15\x2d\x37\xff\x52\x9e\
\xaf\x20\x0b\xfc\xe8\xc0\xf3\x8c\x8e\x8e\xd2\xd5\xd5\x45\x32\x99\
\x5c\xa8\x27\xb1\x9d\xd3\xe3\xc3\xc7\xad\x42\x5c\xa1\x50\x08\x51\
\x90\x01\x81\x64\x32\x4d\xb8\xbb\xa7\x91\x39\x28\xd2\x35\xb4\x86\
\xd4\x85\x38\x72\xa4\x44\x6c\x10\xa4\x20\xe8\xf3\x90\xac\xc5\xe9\
\xdb\xb0\x01\x51\x5c\x50\xad\x91\xa4\x00\x2b\x7b\x57\x13\xba\xfa\
\xbb\xec\xd3\xbf\xc2\x54\xe4\x38\x96\x69\x63\x99\x36\x93\xf2\x29\
\x2a\xca\x2c\x9f\x78\xf8\xbd\x3c\xbe\xe6\x10\x3f\xf9\xeb\x23\xcc\
\xfe\xdd\x2c\x8f\x3f\xfe\x38\x8f\x3e\xfa\x28\x9b\x36\x6d\xc2\x30\
\x8c\xb6\x4c\x67\xdf\xe3\x5a\x12\x63\x63\x63\x64\x32\x19\x4a\xa5\
\x12\xf9\x7c\x9e\xae\xae\x2e\x10\xe5\x25\xbd\x12\xc1\xb2\xf9\xc1\
\x0f\x7e\xe0\xd5\x5d\x05\x94\x86\xca\x43\x33\x29\x00\xd8\x22\xa6\
\xa5\x7b\x0a\x13\xe1\x50\x14\x51\x72\xaa\xce\x73\xb9\x1c\x03\x7d\
\xbd\x24\x12\x09\x34\xcd\xf0\x64\x86\xc4\x65\x3c\x10\x77\xed\xa8\
\x52\xa9\x90\x9f\x99\xa6\x7f\x70\x10\xa1\x91\x20\x21\xd8\x82\xe7\
\x69\x79\x1e\x9a\x61\x52\xad\x55\x79\xe1\x85\x17\x98\x2b\x38\x2a\
\xcb\xa1\x60\xc4\x59\xb3\x13\x9c\xf5\x25\xcb\x6c\x14\x03\xd7\xca\
\x0c\x0d\x0d\xb1\xe3\xb6\xdb\xb1\x97\x64\x4e\xab\x85\x12\x9b\x09\
\xa7\xf5\xc6\x97\xae\x5b\x8b\x50\xc4\x21\xa4\xa5\x8e\x27\x08\x36\
\xa6\xbe\x40\x5c\x82\x20\x78\x42\x9e\xbd\xbd\xbd\xcc\xcc\xcd\x62\
\x59\x16\x41\x25\xb8\xcc\x78\xaf\x13\xa6\xc5\xee\xdd\xbb\xe9\xeb\
\xeb\xa3\xbb\xbb\x9b\x4c\x26\xd3\x52\x74\xec\x87\x08\x7d\xdc\x4a\
\x88\x44\x22\xc4\x62\x31\xce\x9d\x3d\xc9\xb6\xed\x77\x7b\xa2\xb8\
\xce\xf3\x2e\xb1\xf5\x7d\x3f\xcb\xfd\x7f\xb4\x9b\xdd\x33\x4f\xa1\
\xce\x1a\xc8\x31\x08\x4e\xc4\x78\x7b\xee\x67\x48\xe7\xba\x1a\xcf\
\xaa\x2b\x99\xe6\xd8\xb3\xfe\xde\x21\xe2\xc5\xdf\x62\xbc\x74\x88\
\x83\xa1\x6f\x50\x57\x1c\xef\x6b\x5e\x9e\xe2\x65\xf9\x47\xec\xba\
\xf7\x36\x46\x56\xf4\xf3\xb7\x7f\xf6\x38\xea\x05\x95\xcf\x7d\xee\
\x73\xfc\xe2\x2f\xfe\x22\x0f\x3e\xf8\xa0\x67\x37\x7d\x75\xf8\x65\
\xe0\x2a\x60\x84\x42\x21\x66\x67\x1d\xa3\x28\x49\x92\x47\x0c\xed\
\x18\xbf\x70\x8e\x62\xa9\xc0\x40\x7f\x2f\x99\x4c\x86\x48\x24\xe2\
\xa5\x83\xba\xa8\x54\x2a\xe8\xba\x4e\x30\x18\x24\x96\x88\x23\x49\
\x02\xb2\x18\xf0\x94\x21\x62\xb1\x08\xc9\x64\x9a\x50\x28\xc4\xfc\
\xfc\xfc\xc2\x05\x5a\x86\xb8\x5c\x79\xa4\x52\xa9\xe4\xc9\x3e\x89\
\x72\xa0\x91\xdd\xd8\xe6\x25\xd9\x26\x27\xcf\x9f\xe1\xc4\xb1\xe3\
\xe4\x72\x39\xd6\xaf\x5d\xe7\x85\xbf\x9a\xa5\x94\xdc\x9a\xaa\x7c\
\x3e\x8f\xaa\x6b\xde\x67\xcd\xcb\x57\x8e\xe7\xd1\xda\x38\xd2\x1d\
\xa7\x9b\x94\x61\x35\x89\xf2\x5e\x2f\x71\xb9\x19\x8c\xb2\x2c\x63\
\xb9\x44\xdf\xa0\x47\x11\x0b\x75\x09\x29\xa9\x70\x38\x4c\x26\x93\
\x61\xc5\xe0\x10\x07\x0e\x3e\xcf\xdd\x77\xdd\xe3\x85\x59\xaf\x6f\
\xc1\x74\x21\x6b\x11\x60\xb6\x90\xc7\x32\x75\x7a\xbb\x57\x91\xeb\
\xe8\x24\x91\x48\xb4\x14\x77\xfb\xf0\x71\x2b\x21\x1c\x0e\x13\x8f\
\xc7\x79\xe9\xa5\x17\xd9\x79\xfb\xbd\x48\x4d\xd2\x6b\x4e\x18\x31\
\xc0\x87\x7e\xfb\x8f\xe8\xfd\xe6\x9f\x93\xff\x1f\x2f\x21\xca\x22\
\x6b\xfa\x1e\xe0\xed\xbf\xf5\x2b\x88\xa2\xc0\xd9\xb3\x67\x79\xe6\
\x99\xa7\xb9\xf3\xce\xbb\x18\x1d\xdd\xe0\x3d\xbb\xe9\x54\x8a\x68\
\x64\x17\xbd\xa5\x0d\xbc\x54\xfb\x2e\x27\x94\xef\x61\x99\x26\x96\
\x59\xe3\xac\xfc\x1c\xb9\x81\x35\x7c\xf2\xbf\x7c\x84\xbf\xfc\xea\
\x77\x99\x7b\xb6\xc0\x17\xbf\xf8\x45\xb2\xd9\x2c\xb7\xdd\x76\x1b\
\x9a\xa6\x2d\xd8\x1c\x9f\xb8\x5a\x91\xcf\xe7\x59\xb9\x72\x65\x23\
\xf3\x0c\xaf\x07\x4c\x33\x71\x35\x7b\x5e\xb9\x5c\xae\x45\xcc\xd5\
\x35\xc0\x00\xf3\xf3\xf3\x00\x44\xa3\x51\x52\xa9\x94\xa3\x49\x18\
\x72\x14\x2f\x42\x4a\xd8\x79\x0f\x29\x84\xc3\x61\x42\x21\xa7\xb7\
\x4d\xb5\x5a\x45\xc4\x49\xce\x10\xe4\xa5\x95\x1a\x64\x41\xc4\x30\
\x0c\x2a\xb5\x2a\xb7\x6f\xda\xd8\xc8\x76\x5c\x9a\xe4\xb4\xba\xce\
\xd1\xc3\x47\x58\x3d\x3c\xc2\xe0\xe0\x20\xd9\xce\x0e\x8f\xb8\x9a\
\xc9\xd5\x2d\x60\x0e\x06\x83\x4c\x4c\x4c\x78\xf5\x18\xb6\xd8\x16\
\x52\x6b\x23\x20\x51\x14\x11\x24\x69\x41\x3d\xfe\x1a\xc4\x75\xad\
\x50\xa1\x4b\x48\x0b\xdb\x88\x9e\x37\xb6\x14\x01\x4a\x92\x44\x3c\
\x1e\xa7\xab\xab\x8b\xd5\xc3\x23\x9c\x3d\x7b\x96\xb5\x6b\xd7\x5e\
\xb7\xba\x85\xd0\x50\x02\xb1\x11\xd1\x75\x9d\x9f\xfc\xe4\x27\xac\
\x59\xb3\x86\xee\xee\x6e\x2f\xb3\xb3\x5d\x07\xd2\x87\x8f\x5b\x05\
\x92\x24\xb1\x69\xd3\x26\xbe\xfe\xf5\xaf\xb3\x6f\xff\x8f\xb9\xf7\
\xde\x77\x21\x49\x6e\x67\x63\xc7\xce\xc5\xe2\x71\xde\xfd\xf0\x7f\
\xf2\xa2\x12\x8e\x44\x93\x33\x51\x7f\xe4\x91\x5f\xc3\xb2\x2c\xbe\
\xfd\xed\x7f\xe4\xab\x5f\xfd\x1a\xa9\x54\xda\xdb\xb7\xa2\x04\xe8\
\xcc\x76\xf2\x96\xda\x47\x58\x53\x7d\x2b\xcf\x69\x7f\xcd\x94\x75\
\x02\xc9\xb2\x99\x94\x5e\xa6\x12\x9a\xe1\x57\xff\xf7\x77\xf3\xdd\
\xe1\x83\xec\xfd\xda\x09\x3e\xff\xf9\xcf\xf3\xd8\x63\x8f\xd1\xd1\
\xd1\x81\xa6\x69\xde\xf3\xef\x13\x57\xdb\x05\x6b\xf6\x26\xdc\x66\
\x69\x41\x39\xb0\xc8\xa3\xb0\x6d\x9b\x68\x3c\xc9\x07\x3f\xf4\x6f\
\xb9\x7c\x71\x9c\x5a\xad\xc6\x7c\xa9\xe8\xd4\x37\x09\x32\xf1\x78\
\x1c\x41\xb4\x31\x6d\x8b\xf9\x52\xd1\x51\xb1\x50\x55\x52\xa9\x14\
\x3d\x5d\x8e\x81\x0c\x06\x83\xc8\xb2\xe2\x55\x8b\x2f\xc4\x85\x25\
\x02\x72\xc0\xd3\x12\x14\x90\x1a\xed\x48\x4c\x54\x09\x6f\x8d\x2b\
\xa8\x84\x09\x04\x82\x0b\x63\xc2\x69\xa3\x8d\xed\x90\xdb\x4f\x7e\
\xf4\x63\x7a\x7b\x7b\x19\x18\x18\x60\x60\x60\x80\x74\x36\xe3\x89\
\xc6\xb6\xab\xae\x57\x2a\x15\x0a\x85\x59\x8f\x7c\x25\x49\xc2\x12\
\x5a\xb3\x11\x85\xf6\xac\x4a\x49\x42\x0a\x04\x9a\x3c\x2e\x6b\x81\
\xe0\xc4\xeb\x2f\xd8\x75\x85\x71\xbd\xed\x9b\x42\xad\x4b\x11\x97\
\xdb\x51\x35\x99\x4c\xd2\xd7\xd7\xc7\xa1\x43\x87\xd0\xf4\x3a\xf1\
\x60\xd2\x93\x7e\x02\x30\x1b\xe7\xc3\xdd\x9f\xd5\xd8\x9f\x60\x09\
\x80\x73\x8e\x9e\x3f\xb8\x9f\x5c\x77\x17\xdd\xbd\x3d\x64\x3a\xb2\
\x44\x62\xd1\x96\x44\x99\x16\x0f\xd6\x27\x32\x1f\xb7\x00\x02\x81\
\x00\xef\x7d\xef\x7b\xf9\xe6\x37\xbf\xc9\x81\xbd\x07\x48\xa7\xb3\
\xec\xbc\xed\x6e\x8f\xb4\x04\x41\x40\x68\xd2\x12\x6c\x7e\x15\x0a\
\xf3\xde\x24\xb5\x56\xab\xa1\xaa\xda\x12\x51\x17\x81\x48\x38\x44\
\x28\x38\xc4\xbb\x6b\xbf\xcb\x84\xf1\x02\xcf\x6a\x5f\xa1\x2e\x15\
\x99\x97\xa7\x38\x25\x3e\xc5\xdb\xde\xbe\x95\x91\xa1\x5e\xfe\xf6\
\x4f\x7f\xc0\x97\xbf\xfc\x65\x1e\x7d\xf4\x51\xa2\xd1\x28\x9a\xa6\
\x2d\xa9\x79\x78\x4b\x13\x97\x6b\x9c\x5c\xd5\xf6\x6c\x36\xdb\xd2\
\x37\xc6\x6a\xeb\x4f\x25\x49\x12\xa5\x52\x89\x5c\x2e\xe7\xe9\xe2\
\x45\x22\x91\x45\x6b\x5c\xee\xba\x14\x38\xc9\x15\x85\xd9\x39\x0e\
\x1f\x3e\xcc\xf0\xf0\x30\x7d\x7d\x7d\x64\x32\x19\x24\x69\x61\x5d\
\xca\x2d\xc8\x6d\x17\xb9\xa5\x71\x73\xb8\xaa\xef\xae\x94\x92\x4b\
\xa4\x82\xeb\x45\xd9\x4e\xad\xd7\xf4\xf4\x34\x5b\xb6\x6c\xa1\xab\
\xab\x8b\x4c\x26\x43\x3c\x91\xf0\x48\xab\x9d\xb8\x5c\xef\x52\x14\
\x45\x67\xad\x4e\x58\x08\xdd\x79\xad\x4a\x9a\x6e\x3e\xc1\x76\x08\
\x47\x6a\x1c\xbf\x9d\xb8\xae\xc7\xd3\x6a\x26\xc0\xe6\x50\xe1\xb5\
\x88\xab\xd9\xeb\x0a\x87\xc3\x64\xb3\x59\x86\x87\x87\xd9\xb7\x67\
\x2f\xf7\xdd\xff\x0e\xaf\xde\xce\x79\x3c\x84\x45\xc4\xe5\x1e\xcf\
\xb6\x6d\x66\xe7\xa6\xb9\x72\xe5\x0a\xa3\xa3\xa3\xe4\x72\x39\x2f\
\x21\x63\xa9\x19\x9d\x4f\x5a\x3e\x6e\x25\xe2\xea\xe9\xe9\xe1\xde\
\x7b\xef\xe5\xa9\xa7\x9e\xe2\xb9\xdd\xcf\x50\x52\xe7\xb9\x73\xdb\
\x5b\x09\x45\xa2\x28\x01\x19\x45\x09\x12\x0e\x85\x08\x04\x14\x62\
\xd1\x90\xf7\xc4\x0d\x0c\xf4\xf3\x1f\xfe\xc3\x2f\xf2\x93\x9f\xfc\
\x88\xf5\xeb\x37\xf0\xd8\x63\x5f\xe0\x37\x7e\xe3\x51\x0c\xc3\xe0\
\xca\x95\xcb\x5c\xbe\x7c\x99\x2b\x57\x2e\x73\xdf\x7d\x0f\x30\x32\
\x32\x42\x34\x1a\x65\xd8\xb8\x93\x21\x63\x1b\x13\xf6\x0b\xbc\x68\
\x7c\x9f\xab\xe6\x69\xce\xc9\xfb\xe8\x5a\xb9\x9a\x4f\xfd\xd7\x0f\
\xf3\xc5\x2f\x7e\x9b\xf7\x9f\x7b\x3f\x1b\x37\x6e\xf4\x22\x61\xcd\
\xcf\xf9\x2d\x4f\x5c\xa9\x54\x8a\x33\x67\xce\xd0\xd7\xd7\x47\x20\
\x10\x20\x16\x89\x62\x68\x8e\x41\xd7\x75\x9d\xe2\xfc\x9c\xe3\x51\
\x21\x41\x83\xe0\xfa\x7b\x7a\x17\x8c\xb9\xab\xc4\xc0\xe2\xe4\x0c\
\x02\x0b\x7f\x27\xe2\x29\x3a\x3a\xbb\x38\xf0\xfc\x3e\x2a\xb5\x2a\
\x23\xab\x86\x49\x26\x93\x9e\xc2\xc4\x22\xe2\x6a\x0a\x71\x49\x82\
\x88\xa1\xe9\xe8\xa6\x41\x40\x91\x50\xe4\x25\xc2\x84\xb6\x48\xb9\
\x5c\x26\x9d\x4e\x93\x4a\x24\x89\xc6\x63\x04\xc3\xa1\x16\x85\xf3\
\xf6\xf4\x79\x27\x0c\x20\x7b\x1e\x90\x13\x4a\x6c\x13\xb9\xb4\x9b\
\x12\x4f\x00\x49\x16\x5a\x8f\x6f\x37\x85\x54\x69\x3e\x8e\xc5\xb5\
\x6a\xc6\xbc\x10\xeb\x22\xa2\x5e\xbe\x71\xa5\x3b\xb9\x88\xc7\xe3\
\xe4\x72\x39\x06\x07\x07\xb9\x70\xfe\x2c\xeb\x46\xd7\x23\x0a\x4e\
\xda\xbb\xdc\x98\x15\x36\xab\xe1\xbb\xfb\xaf\xd7\xeb\x7c\xff\xbb\
\xdf\x63\x70\x70\xd0\xf1\x80\x93\xa9\x65\xfb\x9b\xf9\xf0\x71\xab\
\x21\x1e\x8f\xf3\x91\x8f\x7c\x84\x1f\xfe\xf0\x87\xe8\xba\xce\xc5\
\xd3\x17\xf9\xd4\x91\x47\x48\x09\x16\x99\x35\x41\x42\x21\x85\x50\
\x20\xcc\x9a\xec\x0e\xee\x1a\x79\x3f\x2b\x06\x56\x11\x8d\x44\x00\
\x81\x8f\x7e\xf4\xdf\xf1\xd1\x8f\xfe\x3b\x00\x26\x26\xc6\xf9\xf4\
\xa7\x7f\x97\x5c\x2e\xc7\xbb\xdf\xfd\x1e\x36\x6e\xdc\xc8\x7d\xf7\
\xdd\xcf\x37\xbf\xf9\x0d\xbe\xff\xfd\xef\x92\x4a\xa5\xf8\xd8\xc7\
\xfe\x3d\x01\x39\xc6\xb0\x75\x37\x2b\x03\x77\xa2\x09\x25\x4e\xeb\
\x4f\x73\xc9\x3e\x46\x2d\x72\x99\x9f\xf9\xb9\x3b\xf9\xce\x73\xdf\
\x63\xed\xda\xb5\x28\x8a\xe2\x39\x01\x3e\x71\x35\x90\x4e\xa7\x49\
\xa5\x52\x9e\xda\x42\xad\x56\xe3\xc8\x91\x23\xec\xbc\xfd\x0e\x22\
\x91\x08\xd9\x6c\x96\x40\x20\xb0\x20\x49\xd4\x6e\x4c\x05\xdb\x6b\
\x4b\xb2\x88\xb8\xda\xfe\x8e\xc7\xe3\xdc\x73\xcf\x3d\x7c\xf7\xbb\
\xdf\x25\xa4\x38\xe1\x3e\x4d\xd3\x5a\x89\xab\xe1\x25\x88\x6d\x06\
\x5b\x33\x9c\x1a\xa9\x50\x30\x82\x28\x2b\x8b\x04\x7d\x45\xdb\xc9\
\x12\x0c\x87\xc3\x84\xc3\xe1\x16\x91\xdb\xeb\x31\xca\x4b\xb5\xd6\
\x6e\x21\x80\x36\x4f\xa9\x99\x30\x5f\x8d\x87\x22\x49\x92\x93\xee\
\x7a\x9d\xc4\xd5\x5c\x94\xec\x86\x0c\x73\xb9\x1c\xcf\x3d\xf7\x1c\
\xab\x57\xaf\x26\x18\x0d\x7b\xdb\xdb\xb6\x8d\x20\x49\x8b\xce\xff\
\xf8\xf8\x38\xa1\x50\x88\x6c\x36\x4b\x22\x91\xf0\xc4\x80\x7d\xe2\
\xf2\xe1\xc3\xc9\x78\x1e\x19\x19\xe1\xee\xbb\xef\xe6\xf1\xc7\x1f\
\x67\xcb\x96\x2d\xf4\x06\x07\x38\x5c\xd8\x4f\xb2\x60\x90\xdd\x20\
\x22\x07\x45\xc6\x4a\xc7\x78\xea\xd2\xdf\xf1\xce\xe1\x5f\xe4\xfe\
\x0d\x1f\xa5\x2b\xd7\x89\x28\x2e\xa4\xae\x0f\x0c\x0c\xf2\xd8\x63\
\x7f\xc6\x9f\xfc\xc9\x1f\x73\xf6\xec\x59\xee\xbc\xf3\x6e\x44\x51\
\xe4\xe3\x1f\xff\x79\xde\xf7\xbe\x77\x61\x9a\x26\xf7\xdf\xff\x0e\
\xba\xbb\xbb\x91\x44\x09\x51\x10\x11\xcc\x14\xa3\xd2\x7b\x19\x15\
\xdf\x8d\x6d\x5b\x94\x57\xce\xf2\xf9\x1f\xfe\x31\x17\x2e\x5c\x60\
\x68\x68\x88\x50\x28\x74\xd3\x87\xee\x6f\x48\x8a\xc9\xe0\xe0\x20\
\xb5\x5a\x8d\x70\x38\xec\x65\xef\x75\x75\x75\x2d\xf2\x80\x16\x19\
\xec\x6b\x10\x55\xb3\x21\x36\x1b\xda\x78\x92\x12\xe0\xce\xbb\xef\
\xe1\x85\xe7\xf7\x13\x89\x44\x9c\x70\x59\x43\x02\x49\x52\x1a\xeb\
\x6a\xa6\x81\x61\x98\x9e\x7c\x92\x69\x39\xa4\x94\xcb\x75\x53\xd7\
\x0d\x66\xe6\x8b\x88\x82\xd0\xd2\xb1\x58\x12\x9d\x0c\xc9\x40\x20\
\x80\xac\x04\x9a\x16\x4f\x6d\x96\x2a\x8c\x6e\xbf\x01\x64\x49\x59\
\x3a\x7b\x47\xb6\x5a\xd6\xae\x64\x49\x21\x20\x07\x9b\x76\xd4\xd4\
\x09\x59\x6c\x57\x12\x11\x5f\x31\x54\x78\xdd\xc4\x65\x3b\x61\x53\
\xbb\x89\xbc\x42\x21\x85\x70\x34\x82\x69\x5b\x5e\xf3\x48\xbb\x11\
\x3a\x15\x6d\xd1\x5b\x23\x74\x3d\xc8\xc2\xfc\x2c\x8a\xe2\x24\xc7\
\x44\xc3\x11\x82\x01\xc5\x6f\x16\xe9\xc3\x47\x93\x4d\x48\x24\x12\
\x3c\xfc\xf0\xc3\xe8\xba\xce\x0f\x7f\xf8\x43\xd6\xae\x5d\xcb\xba\
\xc8\x66\x8e\x97\x5e\xc0\x3a\x6a\x90\xdd\x28\x21\x2b\x36\x96\x59\
\xe7\xdb\x27\xbf\xc4\xa1\xc9\x27\xf9\xd8\xd6\x4f\x33\xba\x6a\x1b\
\xb1\x58\xd4\xdb\x57\x30\x18\xe4\x77\x7e\xe7\x53\x3c\xfc\xf0\x7f\
\xe4\xec\xd9\xb3\xfc\xe6\x6f\xfe\x36\xa9\x54\x8a\xcf\x7e\xf6\x0f\
\x29\x16\xe7\x1d\x41\xf1\x96\x63\x37\x24\x12\x6c\x47\x47\x34\x2c\
\xa5\xf9\xf4\xaf\xfc\x1f\x1c\x3b\x76\x18\x55\x55\x59\xb7\x6e\x1d\
\xc0\x4d\xbd\xd6\x25\xdf\x88\x0b\xe6\xc6\x79\x13\x89\x04\x03\x03\
\x03\xec\xdf\xbf\x9f\xfb\xef\xbf\x9f\x44\x32\xb6\xd8\x0b\xb1\xed\
\x56\xa2\xb2\x9b\x8d\xb4\xd3\xa0\xd0\xfd\x5e\xbd\x5e\xa7\x56\xab\
\x51\xa9\x55\x9d\xf7\x72\x8d\x4c\x36\xc5\xec\xec\x2c\x33\x33\x33\
\x24\x12\x09\x2c\xcb\x22\x3f\x3b\xc7\x33\xff\xeb\x3b\x0e\x51\x35\
\xfc\x3a\x77\x1d\xeb\x8e\xb7\xec\x04\x51\x40\x6f\x48\x52\x1d\x3a\
\x7c\x04\x27\x2d\xc3\x46\x14\x9d\xb5\x9b\x50\x20\xc0\xe6\x4d\x1b\
\xa0\xb1\x98\x7a\xbd\xed\xe7\xdd\xcf\x27\x27\x27\xd1\x2d\x93\x58\
\x2c\xe6\x35\x72\x74\xd6\x8a\x5a\xbf\xdb\x9c\x45\xb9\x88\xb8\xda\
\xc4\x33\x5f\xc9\xe3\xfa\x69\x43\x85\xcd\x61\x4e\x57\xcd\x3a\x18\
\x0c\x92\x4c\x26\x91\x03\x0b\x1a\x87\x6e\xff\xb1\xc6\x91\xbc\x63\
\xac\x5d\xbb\x96\x23\x87\x0e\xb7\x68\x29\x2e\xa5\xf4\xe1\x27\x65\
\xf8\xb8\x55\x11\x0c\x06\x19\x19\x19\xe1\x97\x7f\xf9\x97\xc9\x66\
\xb3\x7c\xe3\x1b\xdf\x70\x32\x79\x7b\x37\x71\xb2\x78\x08\xf3\xa8\
\x49\xc7\x68\x00\x39\x68\x21\x99\x22\x63\xb3\xa7\xf8\xef\xcf\x3c\
\xcc\x2f\xd4\x3f\xc3\x1d\xeb\x1e\x22\x91\x68\xb5\x97\x6b\xd6\xac\
\xe3\xe7\x7e\xee\x43\xfc\xe1\x1f\x7e\x96\x8f\x7f\xfc\xe7\xd9\xb1\
\x63\xe7\x32\xf6\xc1\xc6\xb6\xc1\xfd\xc8\xb2\x2c\x0c\xd3\x66\xf3\
\xa6\x1d\x5c\xb8\x70\x92\x5a\xad\x46\x20\x10\xc0\x30\x8c\x9b\xb6\
\x30\xf9\x86\x25\xf5\xbb\x4d\xd4\xba\xba\xba\x28\x97\xcb\x7c\xfb\
\x9f\xbe\xc5\xbb\xde\xf3\x6e\x3a\x3b\x3b\x17\x49\x90\xa8\xba\xd3\
\xc9\xd7\x7d\x55\x2a\xb5\xc6\x7b\x85\x4a\xb5\x4a\xa9\xf1\x6f\x27\
\x36\xdb\x7c\xa1\x04\x52\xf1\x38\x1b\x37\x6c\x66\x66\xea\x2a\x8a\
\x1c\xf0\x3c\x2b\x5d\xd7\x31\x45\xe7\x3b\x0e\x05\xda\x88\x2c\x84\
\xc8\xaa\xd5\x2a\xc1\x50\x08\xcb\x71\x3f\xa0\x21\xfa\x2b\x34\x42\
\x8e\xae\xd7\x61\x59\x16\x16\x36\x16\x76\x43\x05\x5d\x5c\x64\xf8\
\x5d\xa2\x71\x54\x33\xe0\xc2\xc4\x38\xa7\xcf\x9c\x5b\x48\x46\x11\
\x45\xe2\x11\xa7\x23\xb0\xa1\x5b\x2d\xbf\x5f\x0e\x2a\xde\x3e\xc4\
\xc6\x3a\x96\x60\x2f\x08\x03\xb7\x9e\x2b\xcb\xfb\xdd\x76\x9b\x87\
\xa8\x04\x24\x4f\xb1\x63\x41\x94\xb8\x35\x6c\xe7\xee\xd7\xf6\x92\
\x5e\x9c\xdf\x5a\x28\x14\x38\x73\xe6\x1c\x6f\x7b\xdb\xdb\x9c\x5a\
\x3a\x04\x4f\xb9\xbf\x99\x08\x9b\x23\x9d\xd9\xce\x2e\xb6\xdd\x23\
\x68\xe5\x0f\x00\x00\x1a\x26\x49\x44\x41\x54\xb6\x9d\xb9\x99\x59\
\xca\xe5\xb2\x53\xb2\x10\x6c\x78\x5d\x08\x0b\x0a\x4f\xaf\x51\x4c\
\xd3\x87\x8f\x9b\xd9\xeb\x8a\xc7\xe3\x6c\xd8\xb0\x81\x60\x30\xc8\
\xaa\x55\xab\xf8\xca\x57\xbe\xc2\xd8\xa1\xf3\x44\x7b\x73\xcc\x5a\
\x93\x68\xcf\xd7\xe8\xdc\x10\x24\x98\xb0\x91\x2c\x9b\xaa\x59\xe5\
\x2f\xf6\x7e\x1a\xdd\xac\xb1\x6b\xc3\x07\x48\x26\x62\xde\xf3\x23\
\x49\x22\x03\x03\x83\x7c\xe6\x33\x9f\xe5\xb1\xc7\xbe\xc0\xf1\xe3\
\xc7\xf8\xd0\x87\xfe\xed\x35\x9f\x2d\x6f\x42\x69\xd9\xd8\xb6\x45\
\x20\x10\x24\x14\x72\x92\x41\x7c\xe2\x5a\xe2\x82\xb9\x7a\x5d\x99\
\x4c\xc6\x6b\x24\xf8\xc4\x13\x4f\x78\x72\x43\x6e\xd7\x61\xc3\x30\
\x30\x0d\x1b\x5b\x14\x10\x2c\xc7\x50\xca\x02\xc4\x62\x4e\xf5\xb9\
\x0d\x5c\x99\x9a\xa2\x52\xa9\x34\xd4\xcf\x5b\x2f\xd2\x7c\xb9\x44\
\x2c\x16\x63\xe2\xc2\x79\x4f\x66\x6a\xc1\xc8\x3a\xe1\x30\x01\xb0\
\x1d\x6d\x25\x2f\x44\xa7\xe9\x0b\x4a\xf0\x16\x36\x34\x6a\xa7\x6c\
\x01\xaf\xab\x71\xb5\xe2\xf4\xa5\x32\x0c\x63\x59\x8f\xa2\xbd\x2e\
\x4b\x14\x64\xa7\x37\xb2\xb0\xa0\x66\x6f\xda\x36\xc5\x4a\x99\xe9\
\x99\x39\x22\x61\xa7\xaf\x96\x28\x4b\x4c\x5c\xbe\xc4\xfe\x6f\xff\
\x23\x1d\x1d\x1d\x64\x32\x19\x3a\xb3\x1d\x64\xb3\x59\x3a\xd2\x99\
\x26\x41\x5f\x7b\x09\x2f\x95\x45\x9a\x87\xa2\x2c\x2d\xea\xe9\x65\
\x61\x2f\x7b\x33\xbb\xd9\x95\xa5\x52\x89\xcb\x97\x2f\x33\x30\x34\
\x48\x47\xa3\xae\x6e\xc1\xdf\x75\x04\x89\x8b\xc5\x32\xf3\xf3\xf3\
\xcc\x15\x0a\x14\x0a\x05\x8a\xc5\x22\x85\x42\x81\x4d\x1b\x47\x39\
\x79\xf2\x24\x57\xf3\x53\x24\x93\x49\x82\xe1\x90\xa7\x06\x62\xb3\
\x58\xb1\xda\x87\x8f\x5b\x95\xbc\xd6\xaf\x5f\x4f\x2e\x97\x63\xc3\
\x86\x0d\x8c\x8d\x8d\x71\xf4\xe8\x51\xf6\x1d\xdf\xcf\x59\xe9\x14\
\x17\x9f\xcf\xd3\xb9\x26\x48\xac\x47\x46\x92\x6d\x2c\x4b\xe5\xaf\
\xf6\x7d\x06\xd5\xaa\x71\xff\xc6\x0f\x93\x48\xc4\x01\xe8\xec\xcc\
\x71\xf5\xea\x55\x7a\x7a\x7a\xf8\xed\xdf\xfe\x24\xdf\xfb\xde\x77\
\xf9\xc3\x3f\xfc\x2c\xbf\xf1\x1b\xbf\x49\x2c\x16\x6b\x22\x2a\x77\
\x42\xbd\x60\xb3\x42\x21\x85\xf1\x89\x33\xac\x5c\x31\xec\x65\x14\
\xde\xcc\xc5\xc8\x37\x74\xe4\x6e\xca\x75\x67\x67\xa7\xa7\x4e\x9e\
\xcf\xe7\x79\xe9\xe8\x8b\x54\x2a\x15\x4a\xa5\x92\x97\x50\x61\xb1\
\x58\xb9\xc2\xb2\x40\x37\x0c\x56\x8e\x0c\x73\xdb\xd6\xad\xec\x3f\
\x78\xb0\xe1\x49\x2c\xf6\x78\x54\xdd\x40\xd5\xb5\x45\xc6\x5a\x58\
\xe2\x46\x92\x24\x89\x90\x14\xc2\xb6\xac\x65\x0c\xab\x40\x5d\x55\
\x09\x04\x02\x4e\xf3\xcb\x72\x85\x44\x2c\xee\xf5\xce\xf2\x14\x33\
\xdc\xee\xbf\x56\x93\xa7\xb4\xec\x91\x69\x21\x38\x77\xfb\x6a\x5d\
\x63\xfc\xe2\x65\xc6\x2f\x5e\xf6\x42\x9a\xf1\x58\x8c\xe1\x15\x43\
\x8b\x67\x4d\xb6\xd0\xf0\xb6\x9c\xd0\x9c\x69\x3b\xde\xa0\x28\x05\
\xbc\xb5\xb2\xe6\x3a\x2c\x57\x92\xca\xdb\x4f\x83\x4c\x6d\x5b\xc0\
\xb2\x6c\xb4\xba\x4a\x3e\x9f\xe7\xcc\x99\x73\x3c\xf4\x9e\x77\x73\
\xe9\xca\x24\x73\xf3\x25\x0a\x33\xb3\xcc\xcf\xcf\x33\x3b\xef\x90\
\xd4\x72\xd1\xca\xe7\x0f\x1e\x62\xc7\xed\x77\x70\xfc\xa5\xa3\xa4\
\x52\x09\x42\x91\x46\xe1\x71\x40\x40\x74\xd5\xe9\x7d\xdb\xe5\xc3\
\x27\x2f\xc2\xe1\x30\x3d\x3d\x3d\xa4\x52\x29\xd6\xaf\x5f\xcf\x8e\
\x1d\x3b\xb8\xef\xc2\x7d\x9c\x38\x7d\x82\xaf\x1d\xf9\x3a\x53\x67\
\x4e\x51\x9d\xad\x93\x5d\xa3\x20\x2b\x22\x96\x69\xf1\x37\x7b\xfe\
\x1b\x35\xad\xcc\x7b\xb6\xff\x12\xa9\x64\x9c\x07\x1f\x7c\x27\x7f\
\xfe\xe7\x5f\xe2\x03\x1f\xf8\x59\xc6\xc7\xc7\x79\xe8\xa1\x77\xb2\
\x76\xed\x5a\x3e\xf7\xb9\xdf\xe7\xe7\x7f\xfe\x17\x00\x58\xbd\x7a\
\x4d\x13\x69\xb9\x5d\x28\x24\xc6\x2e\x9e\x61\x7a\x6a\x86\x81\xfe\
\x21\x2c\xcb\xf2\x1a\x57\xfa\xc4\x75\x0d\xf2\x0a\x06\x83\xa4\xd3\
\x69\x14\x45\x21\x9d\x4e\x53\x2e\x97\xa9\x56\xab\x9e\xfc\x52\x73\
\xe7\xce\xe6\x50\x97\x6e\x3a\x1e\xc1\xd8\xf8\x45\x46\x47\x47\x11\
\x2c\xdb\xd3\xd4\x6b\xf5\x46\x9c\xf4\x6c\x57\x51\xbd\x79\x3f\xb6\
\xb0\xd8\xdb\x90\x65\x19\x39\xa0\x60\x59\x86\x93\x51\xc8\xe2\xef\
\x1d\x3f\x7e\x9c\xcd\x5b\x36\x73\xea\xe5\xe3\x74\x74\x74\x10\x0a\
\x85\x1c\x5d\xbf\x60\x70\xa1\x00\xd9\x15\xb6\x35\x2d\xaf\x36\x6c\
\xb9\x35\x29\xcb\xb2\x5b\x6e\x64\x2f\x3d\xde\xf5\x06\x9b\x82\x81\
\xee\xf9\x30\x0c\xcb\x6b\x78\xe9\xea\x8d\x35\x77\x3f\xae\xd7\x1d\
\x8f\x70\x6a\x6a\x8a\x67\xf7\xee\xf1\x3c\x39\x11\x09\x59\x76\xd2\
\xdd\xc3\xc1\xc0\x92\xd9\x90\xba\xae\x33\x37\x5f\xe0\xd4\xa9\x53\
\x8c\x6e\xd8\xc0\xdf\x7d\xf3\xef\xd1\x4c\xa3\xd1\x2f\x4d\x68\x9d\
\x20\x78\x04\xdd\xb4\x1f\xc1\xa2\xaa\xaa\xe4\xa7\x67\x89\xc7\xe3\
\x5c\xba\x74\x85\x78\x7c\xa1\x8e\xeb\x8d\xaa\x48\xed\xc3\xc7\xbf\
\x04\xa6\xa7\xa7\xf9\xce\x77\xbe\xc3\x0b\x2f\xbc\xb0\x64\x5b\xa2\
\x85\x67\xdc\x60\x83\xb6\x1e\xb1\x5b\x46\x95\xa7\xb8\x74\x60\x8c\
\xdc\x86\x10\xc1\xb8\x84\x65\xda\x7c\x63\xff\x1f\x53\xd1\x8b\x7c\
\xf8\x2d\x8f\x92\xc9\x64\xf8\xe4\x27\xff\x13\xbf\xf3\x3b\xbf\xc9\
\xf1\xe3\xc7\x78\xf2\xc9\x27\xf8\xfd\xdf\xff\x1c\xbf\xf7\x7b\x9f\
\xe1\x91\x47\x7e\x0d\xc3\x30\xf8\xd2\x97\xfe\xb2\x31\xb9\x5d\xf0\
\xba\xea\x5a\x85\x8e\x74\x27\xb3\xf9\x39\xf6\xec\xd9\xc3\xe6\xcd\
\x9b\x19\x1c\x1c\xf4\xb3\x0a\x97\xba\x28\x6e\x51\x6e\xbb\xe1\x75\
\x45\x5e\x43\xa1\xd0\x2b\x18\x7a\x67\xfb\x40\x20\x40\x7e\x7a\xd6\
\xf9\xae\x28\x2c\x22\x18\x87\x1d\x45\x6a\xf5\xfa\x35\xf7\xd7\xf2\
\xa3\x65\x99\x50\x48\xa1\x5e\xaf\x13\x8f\xc7\x99\x2f\x97\x5b\x42\
\x68\xb6\x00\x75\x5d\xa7\x5a\x53\x49\xa6\xb3\x9c\x38\xf9\x32\xf5\
\x7a\xbd\xa5\x55\x87\xd3\x36\x7b\xa1\x6f\x57\xad\x56\xa3\x54\x29\
\x62\xda\x86\x47\x4e\x96\x6d\x2d\x19\x2e\x5b\x2e\xad\x5e\x10\x00\
\xdb\x46\xd5\x34\x82\x91\x30\x85\xf9\x79\x66\x67\x1d\x62\x70\x48\
\xd3\xe9\x9f\x65\x18\x96\xd3\xdc\xf1\xf2\x15\xca\xe5\x32\x73\xc5\
\x79\x4e\x9e\x3d\xd7\xe4\x59\x39\x2a\x1d\x8a\x24\xb3\x6d\xeb\xe6\
\x05\x82\x6d\xba\x2e\x6e\x88\x50\x94\x02\x9c\x1d\x1b\x47\x33\x4d\
\x8f\x40\xdb\xcf\xb1\x3b\x91\x10\x24\xb0\x6d\xcb\x99\x10\x58\x36\
\x88\x02\xa7\xce\x9e\xe5\x6d\xbb\xee\x62\xcf\xb3\xbb\x49\xa5\x52\
\x8d\xd6\x0d\xc1\x86\x52\xbf\xdd\x92\x64\xe2\x87\x0c\x7d\xdc\x0a\
\xa8\xd7\xeb\x7c\xf8\xc3\x1f\x26\x1a\x8d\xa2\xaa\xaa\x77\xdf\xb7\
\x77\x58\x68\x7e\xcf\x99\x19\x66\x6d\x11\x69\x6b\x04\x71\xba\xc8\
\x7c\xf1\x2a\xf1\xee\x00\x96\x29\xf2\x8f\xcf\xff\x15\x95\x7a\x91\
\x5f\x7a\xdb\x7f\x25\x1e\x8f\xd1\xd9\xd9\x09\x38\xb5\x62\xe1\x70\
\x18\xcb\xb2\xf8\xf0\x87\x3f\xc6\x99\x33\x67\x38\x7f\xfe\x1c\x2b\
\x56\xac\xf2\x9e\x75\x41\xb4\xd1\x35\x8d\x64\x22\xc5\xc6\x8d\x1b\
\x99\x9a\x9a\x62\xf7\xee\xdd\x6c\xdb\xb6\x8d\xd5\xab\x57\x93\x4c\
\x26\x7d\xe2\x6a\x26\x9d\x7a\xbd\xce\xdc\xdc\x1c\xd3\xd3\xd3\x54\
\x6a\x55\x6f\x2d\xcb\xb2\x0d\x6c\x4b\xf0\xde\xbd\x4c\xc2\xb6\x77\
\xcb\x36\x30\x74\x8b\x7c\x3e\x8f\x24\x49\xe8\x86\xb5\x88\x1c\x17\
\x42\x6f\x78\xa4\x75\x3d\xc4\xa5\x28\x0a\xf1\x68\x8c\xe3\x2f\xbe\
\xc4\x86\xcd\x9b\x38\xf0\xfc\x0b\x18\xb6\x85\x2c\x07\x3c\xef\xc2\
\xb2\x4c\x5e\x7c\xf1\x45\xd6\xae\x19\xa1\xbb\x73\x35\x17\xce\x9f\
\xe5\xe4\xc9\x93\x48\x92\x44\x57\x57\x57\x23\xfd\x5e\xf4\x0a\x8d\
\x35\x4d\x63\x6a\x3a\x4f\x5d\x33\x48\xb8\xe3\xb3\x6c\x6c\x9c\x2e\
\xc5\xb2\x20\x42\xcb\x98\x5b\x9b\x4a\xba\x6b\x57\x08\xa0\x1a\x3a\
\x95\x6a\x9d\xcd\xdb\xb6\xf2\xd2\x91\xa3\x5c\x99\x9c\xa2\xaf\xbf\
\xc7\xe9\xad\x83\xc0\xfc\xfc\x3c\x93\x93\x93\x04\x43\x11\xee\xbe\
\xe7\xad\x1c\x3a\x7c\xd4\x99\x65\xb9\x63\xb7\x6d\x27\xc5\xbf\xf1\
\x77\xbd\xa6\x71\xfe\xfc\x79\xa6\xa6\xa6\xc8\x66\xb3\xcc\xcf\xcf\
\x53\xab\xd5\x18\x9b\x18\x67\xf3\xd6\xed\xbc\x7c\xe8\xc8\x92\x13\
\x02\xc1\x86\x91\x91\x61\x56\x0c\x0c\xf2\xdc\x73\xcf\x51\xd3\x54\
\x2f\x37\xc6\x30\x0c\x27\x9d\xd6\xb2\x78\xea\xa9\xdd\xec\xd8\xb6\
\x95\xf1\xf1\xf1\x86\x7a\x46\xa4\x21\x43\x25\x5d\xf7\x3a\x97\x4f\
\x6c\x3e\xde\x2c\xf8\xf1\x8f\x7f\x8c\xa6\x69\x24\x93\x49\x3e\xf8\
\xc1\x0f\x7a\xed\x7d\xdc\x72\x11\xf7\xd9\x6f\x7e\xb7\x2c\x8b\x72\
\xb9\xcc\xb9\x89\x73\x7c\x2f\xf8\x7d\x72\x52\x37\x63\xe7\x4f\x11\
\xea\xd2\xb1\x14\x9b\xc7\x8f\xfc\xbf\x24\x23\x19\x3e\x72\xf7\x6f\
\xf3\xe9\x4f\xff\x1e\xb7\xdd\xb6\x93\x5d\xbb\xee\x69\x64\xf4\xc2\
\x5b\xdf\xfa\x76\xee\xbd\xf7\x6d\xd8\xb6\x85\x65\x39\xa4\x25\xc9\
\x22\x57\xa7\x2e\xb2\x62\x70\xd8\x6b\x6f\xd2\xd3\xd3\x43\x3c\x1e\
\x67\xef\xde\xbd\xcc\xcf\xcf\xb3\x71\xe3\xc6\x25\x13\xe6\x6e\x39\
\xe2\x72\xdd\xdf\x42\xa1\xc0\xfe\x03\x07\xe9\xec\xec\x74\x52\xd2\
\x05\x09\x31\x20\x22\x22\xd3\xda\x28\xd2\x79\x77\x9a\x12\x0a\xde\
\xbb\xfb\xff\xe9\xce\x1c\x95\x4a\x85\xc3\x2f\xbe\xb8\xa4\x71\xb5\
\x6d\x1b\xdb\x30\x09\x35\x42\x62\xd7\x1a\x97\x6b\x40\x5d\x91\x59\
\x55\x55\x31\xb4\x3a\x5b\x37\x6e\x20\x14\x8d\xf0\xbd\xef\x3f\x41\
\x2a\x9b\x41\x14\x45\x7a\xbb\x7b\xc8\x65\xd2\x5e\xd8\xb0\xa3\xa3\
\x83\xbe\xbe\x3e\x8f\x98\x35\x4d\xa3\x5a\x75\xc2\x83\xf5\x7a\x9d\
\xae\x9e\x6e\xfa\x06\x86\x88\xc5\x93\xec\x3d\x70\x00\x80\x74\x22\
\xc1\xe0\x40\x1f\x22\x16\x97\x2e\x5d\x42\xd7\x55\x04\x21\xbc\x30\
\xce\xa6\x31\x35\xd1\x05\xb6\x00\x2f\x1e\x3f\x4e\x47\x3a\xc3\xd6\
\x1d\x3b\x09\x05\x83\x8d\x73\x3a\x8b\x6d\xea\x58\x86\x49\x28\x1c\
\x25\x99\xc9\xb2\xfb\x99\x3d\xe8\x96\xe9\x64\x01\x36\x7a\x9b\x89\
\x6d\x72\x54\xb9\x5c\x8e\x9e\xee\x1c\x92\x24\x31\x3e\x3e\xce\xd0\
\xd0\x10\xb1\x58\x8c\x74\xa6\x83\x67\xf6\xee\x41\x54\x42\x08\x82\
\xec\x29\x97\x34\xd8\x8f\x81\xbe\x3e\x4c\xb5\xce\xd1\x17\x0e\xd2\
\xdd\xd5\xc9\xf4\xec\x0c\x85\xb9\x22\x92\x24\x33\xd8\xd7\xc7\x8a\
\xfe\x01\xc0\xe2\xc2\x85\x0b\x14\x4a\x45\x0c\xcb\xe4\xe2\xe5\x4b\
\x44\xa3\xf1\x46\x03\xd1\xd0\x75\x17\x6d\xfb\xa4\xe5\xe3\xcd\x82\
\x43\x87\x0e\x61\x9a\x26\xc3\xc3\xc3\x6c\xd9\xb2\xc5\x23\xad\xe6\
\x97\x4b\x60\xcd\x02\xe3\xb6\x6d\xb3\xa5\xb2\x85\xed\x63\xdb\xf9\
\xea\xb3\x7f\xcb\xba\x1d\x71\xe6\xc6\x2e\x31\x53\x1c\x43\x8e\x8a\
\x7c\xf3\xd9\x3f\x63\xcb\xd0\xdd\x6c\x1c\xbc\x8b\x07\x1f\x7c\xa8\
\x25\x61\xcc\x7d\xb9\xa4\x25\x88\x02\x53\xf9\x4b\x0c\xf6\xaf\xc2\
\x6e\xc8\xcb\xb9\xc7\xd2\x74\x9d\x7b\xee\xb9\x87\x63\xc7\x8e\xf1\
\xf4\xd3\x4f\xb3\x7d\xfb\x76\x06\x07\x07\x6f\xaa\x0c\xc3\x1b\x4a\
\x5c\xd1\x68\x94\x13\xa7\x4e\x33\x57\x28\x78\x49\x03\x4b\xbd\xbb\
\x6b\x57\xee\x7b\xf3\xe7\x26\x4d\x2d\xef\x85\x25\x8c\x5d\xe3\x62\
\x84\x42\xa1\x46\xf8\x4e\x5c\xde\x30\x36\x2e\xae\x2b\x00\x9c\x4e\
\xa7\x51\xe4\x80\xa3\xdd\x25\x8b\xa8\xaa\x8a\x69\x9a\x88\x36\x1e\
\x11\x0a\x58\x84\x42\x21\x14\x45\x69\x21\x3f\xb7\xc3\xa8\x20\x08\
\x64\xb3\x59\x5e\x3a\x7e\x8c\xf9\x52\x05\x51\x09\x7a\xe3\x8d\x46\
\x22\x48\x82\x8d\x60\x0b\x84\x94\x20\xf3\xa5\x22\x98\x09\x44\x51\
\x24\x12\x89\x60\x9a\xfa\xc2\x1a\x9b\x6d\x7b\x21\x3d\x37\x34\x37\
\x3b\x3b\xcb\xec\xec\x0c\x8e\x6a\xa0\x4d\xa5\x52\xe2\x1d\xf7\xbf\
\x9d\x72\xb1\x84\x1c\x08\x72\xf8\xc8\x11\x42\xe1\x30\x48\x12\xb6\
\x69\x39\x84\xe5\x66\x16\x5a\x36\xc9\x64\x9c\x42\xa1\x80\x8d\x49\
\xad\x56\x43\x92\x24\x2a\x95\x8a\xb7\x3e\x66\x23\x52\x29\x95\x89\
\xa5\x14\x24\xaf\x86\xcc\x39\x47\x8a\x24\x33\xbc\x6a\x05\x7f\xf7\
\xf5\xaf\xf1\xc1\x0f\xfc\x2c\xd5\x7a\x8d\x68\x34\xcc\x95\x4b\xfb\
\x89\x44\xe3\xd8\x96\x85\x2c\xb9\xc9\x22\x36\x27\x4e\x9c\xe0\xa1\
\x77\xdc\xcf\xb3\xcf\x3e\x4b\x22\x96\x6c\x74\x5e\x75\x6a\x59\x02\
\x01\xa5\x71\x6f\x58\x4b\x7a\xcc\xcd\x22\xa3\x3e\x7c\xdc\xec\xd0\
\x1b\x3d\xf0\x9a\x5b\x99\x2c\xf7\x6a\x27\x30\x57\xaf\xf5\xd7\x13\
\xbf\xc6\xf7\xf6\x3d\xc1\xd1\x15\x7b\x59\x21\xae\xe0\xf0\xc9\xfd\
\x68\xc1\x3a\x9f\xfd\xfb\x5f\xe5\x2f\x7e\xed\x87\x24\xc5\x8e\x86\
\x41\x5c\x48\xc2\x70\xc3\x83\x36\x50\xab\xcd\x93\x49\x75\x78\xeb\
\x5c\x34\xba\x66\xd4\x6a\x35\xfe\xf4\xbf\x3f\x42\xcd\xec\xe5\xbf\
\xfc\x97\xff\x93\xe9\xe9\x3c\x4f\x3f\xfd\x34\x3b\x77\xee\x64\xe5\
\xca\x95\x44\xa3\xd1\x5b\x97\xb8\x4c\xd3\xa4\xa6\xd6\xb1\x45\x87\
\xdd\xad\xa6\x34\xf3\xa5\xde\x71\xd7\xae\xc4\xc5\xdf\x73\xea\x80\
\x16\x12\xb4\xdb\x62\x92\x74\x77\xe6\x98\x9e\x9e\x26\x12\x89\x34\
\x66\xf9\xaf\xec\xf2\x4a\xa2\x48\x38\x18\x42\x14\x17\x7a\x4a\xd9\
\xb6\x8d\x69\x18\x4e\xf7\x61\xb1\x55\xae\xc9\xb2\x0d\xaf\xf0\x56\
\x51\x14\xca\x95\x1a\x95\x4a\x85\x74\x26\x89\x24\xcb\xa8\xaa\xca\
\x86\x0d\x1b\x78\x6a\xf7\xb3\xc8\x92\x8c\x2c\x2f\x1e\xaf\x6d\xdb\
\xd8\xa6\x85\x6d\x19\x74\x77\x77\x33\x36\x36\xc6\xa5\x89\x31\xfa\
\xbb\xbb\x98\x29\xcc\x31\x57\x28\x12\x08\x05\x1d\x2d\x47\x1b\x7a\
\x7b\xba\x08\x2b\x8e\xa7\x35\x33\x33\x47\xb1\x58\x64\xe5\x8a\x01\
\xaf\x16\x2d\x95\x8c\x33\xd4\xdf\x87\x2d\x0a\x8c\x8d\x4d\xd0\xd9\
\x99\x25\x99\x4c\x3a\x02\xb8\x86\xd3\x28\x33\x28\x89\x5c\x1c\xbf\
\x40\x4f\xd7\x42\x65\xbd\x9b\x0c\xe3\x30\x86\x85\x6d\x98\xa4\x23\
\x61\x12\xe9\x94\x17\xb2\xa8\xd5\x9c\x36\xe3\x4f\x3e\xfe\x04\xdb\
\xb7\x6e\x73\x66\x6b\xb6\x45\x2c\x12\x66\xd5\x8a\x41\x10\x45\x2e\
\x5d\x9a\x40\x16\x9d\xec\xc6\x73\xe7\xce\x11\x0a\x85\xd8\xbb\x77\
\x2f\xb9\x5c\x8e\xc9\xc9\xcb\xcc\xcc\xe4\x11\x45\x91\xad\x5b\xb7\
\xd2\xdd\xdd\xcd\x33\xcf\x3c\xe3\x15\x2a\x87\x42\x21\xea\xf5\xba\
\x17\xff\xdf\xb1\x63\x87\x97\x79\xea\x27\x75\xf8\xb8\xd9\xe1\x35\
\xa1\xf5\x34\x4c\x17\x5e\xed\x64\xd6\x4e\x5c\x6e\x3f\xc1\xe1\xe1\
\x61\xfe\x4d\xec\x83\xac\x3c\x3a\xc4\x3f\x9c\xfd\xff\x78\xe8\x81\
\xf7\x71\xf0\xe0\x3e\xae\x16\xc6\xf8\x93\xef\xfc\x67\xfe\xf3\xcf\
\x7d\xd9\x29\x3b\x69\x27\x2d\xdb\x42\x90\x1c\x65\xf9\x50\x30\x8a\
\xae\x1b\x8d\x63\x3a\x6b\xe8\x3f\x78\xf2\x07\x8c\x4f\x54\xd8\xba\
\xce\x22\x12\x89\xd0\xdf\x3f\x40\x32\x99\x62\xdf\xbe\xbd\x14\x8b\
\x45\xb6\x6d\xdb\x46\x38\x1c\xbe\xf5\x88\xab\x39\x44\x65\xdb\x8d\
\x75\x16\x2f\x7b\xae\x21\xa2\xeb\x46\xca\xae\xf3\xfd\x5a\x37\xc8\
\x9a\xd5\xc3\x3c\xf9\xc4\xf7\x19\x59\x35\x4c\x38\x1c\x6e\x71\x77\
\xdb\xb7\x6f\x2e\x08\x56\x14\x85\x60\xd8\x15\x9c\xb4\x10\x11\x30\
\x4d\x9d\x5a\xb5\x8c\x26\x05\x90\x10\x5a\x42\x67\x9e\xb1\x17\x05\
\x5e\x7c\xf1\x45\x8a\x95\x32\x82\x20\x70\xfb\x8e\xed\x44\xc3\x11\
\x54\x55\x65\xd3\xa6\x4d\x1c\x7d\xf1\x18\x72\x2c\xd0\xb2\x9d\xd0\
\x20\x9b\xde\xde\x5e\xa2\xd1\x28\xfd\xfd\xfd\xac\x58\xb1\xc2\xab\
\x11\xeb\xee\xee\xe6\xea\xd5\xab\x9c\x3a\x7f\x96\x0d\x1b\x36\x90\
\x4b\x67\xa9\xd5\x2b\xde\x31\x57\xae\x5c\xc9\xc4\xc4\x04\x3d\xbd\
\x5d\x14\x8b\x05\xc0\xf1\x0e\x7b\x7a\xbb\x88\x46\xa3\x44\x82\x21\
\x3a\x3b\x3b\xd1\x0d\xd5\xf3\xd8\xba\xbb\x3a\x09\x04\x02\x9c\x3b\
\x7b\x9a\xde\xee\x1e\x6f\x3c\xcd\x02\x9b\xb6\x6d\xb3\x72\xd5\x10\
\xeb\xd6\xac\x47\x55\x55\xef\xfa\x48\x62\x0e\x45\x51\xb8\x70\xe6\
\x34\x23\x23\x23\x94\x4a\x4e\x6f\xb4\x7a\xb5\x46\x4f\x57\x37\xc1\
\x70\x88\xce\x6c\xd6\x4b\xa7\x35\x0d\x9b\x7c\x3e\xcf\xc6\x8d\x1b\
\x31\x4d\x13\x53\x77\x12\x54\x82\xc1\x20\xa7\x4f\x9f\x66\x6c\x6c\
\x8c\x7a\xbd\xee\x89\xfa\x1a\x86\xe1\x65\x9b\x2a\x8a\xc2\x73\xcf\
\x3d\xc7\xae\x5d\xbb\xbc\x45\x67\x1f\x3e\x6e\x76\xe2\x6a\xae\xf9\
\x5c\x8e\xb4\xda\x09\xac\x3d\xf2\xd0\xdf\xdf\x4f\x2c\x16\xa3\x2f\
\xd7\xcb\x57\xf7\x7c\x8d\xdb\x76\xdd\x43\xe1\xd2\x08\xbb\x0f\xff\
\x80\xff\xf8\x8e\x31\x3a\x13\x43\x0e\x61\xd9\x36\xb6\x65\x79\x69\
\xee\x13\x97\xce\x93\x4e\xe5\xd0\xf5\x85\x8c\x6d\x47\x3f\x55\xe4\
\xbd\xef\x79\x1f\x82\x00\xf7\xdd\x77\x3f\xc1\xc6\x12\x44\x32\x99\
\xe4\x8e\x3b\xee\xe4\xe8\xd1\x23\x6c\xd9\xb2\xe5\xa6\x38\xc7\xe2\
\x8d\xbc\x70\xaf\x1b\x04\x6b\x21\x79\xa3\x41\x82\x82\x0d\x9d\x99\
\x2c\x6a\xb5\x46\x2a\x91\x24\x9d\x4e\x13\x8d\x46\x5b\xc4\x6d\xdd\
\x8b\xda\x32\x36\xc1\xf1\xe2\x64\x25\x40\x24\x12\xc3\x69\xb7\xe8\
\x24\x93\x7c\xfc\xa3\x1f\xe1\x3d\xef\x7a\x88\x77\xbe\xe3\x01\xba\
\x72\x1d\x0b\x87\x6f\x52\xb2\x70\x89\x2f\x1c\x8e\x92\x48\xa6\x79\
\xe9\xf8\xcb\x84\xa3\x4e\xdf\xb1\x68\x34\x4a\xb5\x5a\xc5\x34\x4d\
\xaf\x29\xe4\x52\xd2\x4d\xd5\x6a\x15\x70\x54\x2b\xc2\xe1\x30\xba\
\x56\x67\x60\xb0\x0f\xbd\x56\xa5\x2b\x9b\x41\xd3\xeb\x18\x86\x41\
\x3e\x9f\x67\xbe\x54\xc2\xb0\x2d\x6a\x9a\xea\x34\xab\x0c\x47\x3c\
\x52\x50\xeb\x3a\xa5\x4a\xcd\x53\x0a\x99\x9d\x9d\x25\x9f\xcf\xa3\
\xeb\x2a\xb6\x6d\x52\xad\xd7\xd8\xb4\x69\x93\xd7\xdd\x59\xb0\xc1\
\x32\xcc\x96\x71\x01\x18\xa6\xc9\x5c\xa1\xc0\xd4\xd4\x94\x13\x52\
\x14\xa1\x5e\xab\xb0\x79\xf3\x66\xca\xe5\xb2\x77\x9e\x14\x25\xe4\
\x84\x19\xab\x2a\x6a\x4d\xf3\xf6\x29\x60\x71\xdb\xf6\xad\x68\x75\
\x15\x59\x94\xb0\x2c\x03\x59\x16\xbd\x84\x19\xa7\x16\xcc\xf6\x7e\
\xbb\x20\x08\x5e\x5f\x20\x4d\xd3\xe8\xea\xea\x62\xef\xde\xbd\xd4\
\x6a\x35\xdf\xea\xf9\x78\x53\x11\x58\xbb\x57\xb5\x9c\x07\xe6\xca\
\xb6\xb9\x1d\x28\xdc\x7f\x77\x74\x74\xb0\x65\xf3\x16\x7e\xf5\xfe\
\x5f\x41\x3c\x97\x43\x48\xa7\xf9\xf9\x8f\xfd\x32\xff\x73\xcf\x5f\
\x03\x76\x23\xe9\xcd\xc0\x30\x4d\x44\x51\x64\xb6\x30\x89\x2c\xc5\
\x50\x55\x0d\x55\xd5\xd1\x34\x03\x5d\x37\xd0\x0d\x47\xb3\x55\x14\
\x45\xde\xff\xbe\x9f\x21\x1e\x8b\x7b\xe2\xdc\xa2\x28\x72\xe8\xd0\
\x0b\xbc\xe5\x2d\x77\xdc\x34\xe1\xfa\x1b\x42\x5c\x0b\x3f\xde\x72\
\x8a\x61\x5d\xe2\x79\xad\xfb\xc5\x72\x5e\x36\x84\x15\x85\x2d\x9b\
\x37\xf2\xcc\x33\xcf\x78\x9d\x77\xa3\xd1\x68\xc3\x0b\x58\x28\x2c\
\x16\x85\xd6\x75\xa3\xe6\x9b\xc7\x4d\xcd\x07\xb0\xcc\x46\xea\xbe\
\xaa\xa1\x6b\x75\x2f\x4e\xbd\x68\x0c\x6d\xfb\x51\x75\xcd\xb9\x29\
\x6d\xa7\x61\xa6\xa1\xab\x8d\xcc\x1e\x6b\x99\x73\xe2\xdc\xd0\x13\
\x13\x13\x9c\x3a\x75\x8a\xd9\xd9\x59\xe7\x33\xcb\x26\x11\x8b\x3b\
\x99\x88\xa6\x81\x22\x3b\xe2\xb7\x13\x13\x13\xfc\xd3\x3f\xfd\x13\
\xa7\x4e\x9d\x62\xf7\xee\xdd\x9e\x47\x59\x28\x14\xd8\xbd\x7b\x37\
\x7b\xf6\xec\xe1\xd8\xb1\x63\xbc\xfc\xf2\xcb\xc8\xb2\x4c\x34\x1a\
\xf5\xea\xcd\x00\x4f\xde\xc5\x85\xbb\xbd\x4b\x24\x27\x4e\x9c\xe0\
\xd0\xa1\x43\x08\x82\x40\x34\x1a\xf5\xda\x9d\xb8\xdb\x9e\x3a\x75\
\x6a\xa1\xc8\x5b\x55\xd9\xbf\x7f\x3f\x33\x33\x33\x1e\xf9\xda\xb6\
\x4d\x32\x99\xf4\xb6\xa9\x54\x2a\xec\xde\xfd\x2c\xfb\xf6\x1d\x20\
\x18\x0c\x7a\x9f\xbb\xea\x29\x96\x65\x31\x31\x31\x41\xbd\x5e\xf7\
\xba\x5c\x0b\x82\xc0\xdc\xdc\x9c\xb7\x4f\x1f\x3e\xde\x2c\x68\x27\
\xad\xa5\x3c\x30\xb7\x9b\x84\xfb\x6a\x26\x2f\x57\xb8\x61\x74\x74\
\x94\x7f\x73\xff\xcf\x71\x9b\xfd\x56\xf6\xee\xbd\xc8\x98\x3a\xcd\
\x3f\x1f\xfe\x47\x24\x59\xf2\x42\xf0\xaa\x5e\x26\x3f\x35\x43\x50\
\x91\xa9\xd7\x75\xea\xaa\x86\xaa\x6a\xcc\xcc\xcd\xf1\x7f\xfd\xd1\
\xff\x4d\x7e\x66\x1a\x5d\x37\x1a\x09\x1c\xb6\x47\xac\x87\x8f\xbc\
\xc0\xe8\xe8\x28\x1d\x1d\x1d\x37\xcd\x79\xbd\xa1\x5a\x85\x5e\xfd\
\x42\x43\xac\xf5\x35\xb3\x6c\x83\x34\x92\xc9\x24\x3b\x77\x6c\xe7\
\x89\xef\x7d\x9f\x81\xfe\x5e\x7a\x7a\x7a\xc8\x66\xb3\x58\x96\xb5\
\x90\x7a\x8a\x85\x65\x8b\x8d\x82\x5a\x67\x5b\x49\x92\xc0\xb2\x51\
\x14\xc5\x6b\x55\xe2\xb6\x51\x89\x44\xe3\x3c\xf3\xcc\x33\x9e\xda\
\xc7\xaa\x55\xab\x5e\x61\xf6\x61\x81\x6d\xd2\x91\xc9\x7a\x24\x57\
\xad\x56\x9d\x19\x8c\xdd\xa4\x5e\x61\x8b\x2c\xac\x75\x39\x7d\xb5\
\x42\xa1\x08\xe3\xe3\xe3\xec\xdc\xb9\xd3\x91\xb2\x6a\x0a\x2d\xe4\
\xf3\x79\x3a\x3a\x3a\xa8\xd5\x6a\xa4\x93\x29\x72\xb9\x1c\x3b\x6f\
\xbb\x8d\xbd\x7b\xf7\x52\x2a\x95\x5a\x46\x10\x0c\x06\xc9\x65\x3b\
\xe8\xeb\xed\x26\x93\xc9\xa0\xaa\xaa\xd7\x13\x4b\xd3\x1c\x8f\xc8\
\xed\x0e\xed\xaa\x66\x68\x86\xde\x44\xc2\x16\x5b\x37\x6f\x62\xd5\
\xaa\x11\xea\xf5\xba\xd7\xdd\xd9\xfd\x3d\x5e\xff\x31\x77\x0d\xd2\
\x86\xa1\xa1\x21\xa6\xa7\xa7\x18\x1a\x1a\xf2\x1e\xcc\x54\x2a\xe5\
\x79\x4b\x73\x73\x73\x08\x92\xc8\xe6\xad\x5b\x28\x95\x4a\xc8\xa2\
\xb4\xd0\x76\x05\x88\x44\x22\xe4\xf3\x79\x1e\x78\xe0\x01\xce\x9f\
\x3f\xef\x91\x9f\x69\x9a\xcb\x4e\x16\x7c\xf8\xb8\xd9\xc8\xaa\x39\
\x54\xd8\xfc\x7a\x25\x0f\xac\x39\x6c\xd8\xbc\x76\xa5\x28\x0a\x2b\
\x57\xae\x24\x9b\xcd\xb2\x7d\x7a\x5b\x23\xab\xb9\xca\xfe\xfd\xcf\
\xb2\x6d\xdb\x76\x02\x72\x80\x97\x5e\x3c\xcd\xd4\x54\x9e\x73\xe7\
\xce\x72\xd7\x5d\xf7\x52\xae\xa8\x98\xa6\xc5\xd7\xfe\x9f\x3f\xa2\
\x70\xfe\x79\xfe\xe6\xab\x22\x8f\xfc\xfa\x6f\x38\xa1\x43\x59\x02\
\x44\xc6\xc6\xce\x23\x89\x01\x56\xae\x5c\x75\x53\xb5\x24\xba\x21\
\xea\xf0\x92\x24\x11\x8f\xc6\x38\x71\xfc\x24\x77\xdd\xf9\x16\x6a\
\xb5\x5a\xa3\x20\xd7\xa9\xdb\x12\x1a\xc6\xdc\x46\xfc\xa9\xfe\x36\
\x4d\x1b\xcb\x72\x94\xcc\x1f\xff\xde\x3f\xd3\xdb\xdd\xc3\xd0\xd0\
\x10\x3d\x3d\x3d\x24\x93\x49\x34\x4d\x63\x64\xd5\x30\x85\x99\x59\
\x76\xdd\x75\xf7\xc2\x1a\x97\xed\x84\x03\x63\xb1\x04\xfb\x9e\x7b\
\x96\x6c\x47\xda\x5b\x0f\x6b\xbe\x50\xaa\xaa\xb2\x6a\xd5\x2a\xe6\
\xe7\xe7\x29\x95\x4a\x24\x12\x09\xcf\x80\xbb\xe9\xaa\x2e\x61\xf4\
\x66\x7b\x01\x67\xdd\x4a\xad\xd5\x11\x45\x91\xc2\xcc\x0c\xdb\x36\
\x6f\xa1\xa7\xbf\x0f\x59\x96\x99\xbc\x74\x79\xc9\x99\x97\xbb\x00\
\x9b\xcb\xe5\x98\x98\x98\x68\xe9\x8a\x7c\xe2\xd8\x71\x54\x5d\x63\
\xdd\xba\x75\x84\xc3\x61\xd2\x8a\x42\xbd\x5a\x63\xd5\x8a\x95\xcc\
\xce\xce\x2e\x08\xf2\x8a\x22\xc3\xc3\xc3\x48\x92\xd4\x42\x5a\x4f\
\x3d\xf5\x14\x86\x61\xf0\xd0\x43\x0f\xa1\x69\xda\xe2\x0b\xde\x58\
\x97\x72\xf7\xd3\xdf\xdf\x4f\xbd\x5e\x27\x12\x89\xb0\x67\xcf\x1e\
\x26\x27\x27\xf9\xe0\x07\x3f\x88\xaa\xaa\xde\x04\xa4\xd9\x7b\x8c\
\xc5\x62\x74\x75\x75\x51\xad\x56\x9d\x6e\xd5\x40\xb9\x5c\x26\x91\
\x48\x50\xab\xd5\x88\xc5\x62\xac\x1e\x59\x8b\xa6\x69\x64\xd3\x19\
\xd4\x5a\xbd\xa5\x98\xdc\x25\xb1\x44\x22\xb1\xa8\x8b\xf4\xeb\x1a\
\x5e\xf6\xe1\xe3\x5f\x09\xee\x04\xcc\x59\x32\x58\xa8\x63\x6c\xff\
\xf7\x72\x1e\x58\x3b\x71\x35\x3f\x27\x6e\xef\x3b\xd3\x74\x12\xb0\
\x26\x27\x27\x39\x7c\xf8\x10\x91\x70\x98\x74\x3a\xc3\xf1\xe3\x27\
\x78\xe2\x89\x27\xd8\xb7\x77\x1f\x9f\xf8\xf5\x47\xd0\x0c\x8b\xce\
\xae\x75\xd4\xa7\xa6\xf9\xd8\x47\xfe\x3d\x9a\x66\x60\x07\x9c\x25\
\x94\x62\xb5\xc2\x99\xb3\x67\x78\xfb\xdb\xde\x8e\xa2\x28\x08\x82\
\x70\xd3\x74\x46\xbe\x21\x1e\x97\x2c\xcb\x24\x93\x49\x86\x47\x56\
\x72\x60\xdf\x1e\x0a\x85\x02\x9a\xa6\x61\x99\x2c\x5b\x70\x7c\x3d\
\xef\x01\x39\x48\x34\x16\x26\x16\x8b\xb1\x7e\xed\x3a\x72\xb9\x1c\
\xb9\x5c\x8e\x4c\x26\xe3\xa5\xac\x77\x75\x75\x91\x7f\xe9\x25\x5e\
\x3e\x79\x9c\x6a\xb9\x82\x6e\x1a\x0e\x71\x89\x36\x41\x25\xcc\xc0\
\x60\x1f\xdd\xdd\xdd\xc4\x12\x4e\x58\xae\x19\xe9\x74\x9a\xd1\xd1\
\x51\x0c\xc3\x60\x62\x62\x62\xc1\xa8\x1a\xa6\xd7\xe7\x4b\x37\x0d\
\xb6\x6d\xdd\x8c\x20\x39\x51\xd6\x5a\xad\x86\xd0\xf8\xcd\x7b\xf7\
\x3c\x4b\x22\x91\xe0\xd4\xe9\x97\x01\x58\xb5\x62\x25\xf1\x58\x04\
\x5b\x00\xd3\x76\x88\xd7\x6e\xb0\xa9\xa2\x28\x9e\xe1\x77\x0d\x76\
\x3a\x9b\x61\x74\x74\x94\xab\x57\x26\xa9\xa9\x75\xaa\xd5\x2a\xd9\
\x6c\xd6\xfb\xfe\xf9\xf3\xe7\x19\x1c\x1c\x44\x14\x45\x62\xb1\x98\
\x57\x54\xa8\x19\xba\xa7\x41\xb8\x7a\xf5\x5a\xb2\xd9\x34\xd5\x6a\
\x75\xc9\xa2\x5e\xf7\xa1\x12\x04\x01\x0b\x11\x55\x37\x08\x48\x4e\
\x1f\xb3\x9e\x9e\x1e\x36\x6d\xda\xe4\xe9\x47\x82\x53\x9d\x5f\x9a\
\x77\x7a\x93\x85\xc3\x61\x54\x55\x5d\x90\xbc\x6a\x60\x66\x66\xa6\
\x91\x46\x2b\x92\x4c\xa6\xbd\xc4\x0b\xd3\x34\xb1\x05\x8b\x62\xb1\
\xd8\xb2\xd6\xf7\x6a\xb3\x07\x6f\x35\x5e\xf3\xab\x03\x6e\x4e\x48\
\x92\x44\xbd\x5e\xe7\xd4\xa9\x53\x9c\x3f\x7f\x9e\x70\x38\xdc\x42\
\x54\xae\x67\xd3\x4c\x58\x4b\x79\x63\x0b\x99\x82\xad\xef\xee\xcb\
\x2d\x3d\x32\x74\x83\x97\xc7\x4e\xf2\xf4\xd3\x4f\x53\xaf\xd7\x99\
\x99\x99\x61\x6c\x6c\x8c\x8e\x8e\x0e\x1e\x7a\xd7\xbb\x78\xdf\xfb\
\x7e\x96\xd2\x5b\xdf\x81\x28\xc9\xa8\xaa\x86\x6d\x07\x10\x45\x83\
\xbd\xfb\xf7\xf0\xd6\x7b\xde\x4a\x3c\x1e\xf7\x54\x8d\x6e\x96\xac\
\xde\x1b\xe2\x71\xc9\xb2\x4c\x3c\x1e\x67\x70\x70\x90\x58\x2c\x46\
\x69\xbe\x48\xbd\x5e\x5f\x52\xb3\xeb\xa7\x25\x44\x59\x96\x09\x06\
\x03\xc4\x62\x31\xe2\xf1\x24\xf1\x78\xdc\xd3\xc7\x93\x65\x99\x6c\
\x36\xcb\xe6\xcd\x9b\x19\x18\x18\xa0\x5a\xae\x38\xfd\xa2\xb0\xbd\
\xd9\x44\x3c\x1e\x27\x95\x49\x3b\x1e\x42\xa5\xda\x62\x80\xc3\xe1\
\x30\x1d\x1d\x1d\x04\x83\x41\x2e\x5d\xba\xe4\xfd\xbf\xa2\x28\x9e\
\xc7\x65\x99\x26\x56\x63\x91\x53\x09\x05\x89\xc7\xe3\x4c\x5e\xbe\
\xc2\xcb\x2f\xbf\xcc\xed\xb7\xdf\x4e\x47\x47\x87\x17\x9e\x9b\x9b\
\x9b\x6b\x19\xbb\x9b\x38\xe2\xde\x24\xee\x2c\xa7\xf9\x3b\x2e\x29\
\x45\xe3\x31\x8f\x18\xa3\xd1\x28\x27\x4f\xbc\xcc\x96\x2d\x5b\x98\
\x9b\x9b\x63\x60\x60\xc0\xfb\xff\x50\x28\xc4\x85\xf1\x31\x56\xac\
\x58\x41\xbd\x5a\x27\x9d\x4e\x13\x08\x04\x08\x85\x42\x2d\x6b\x48\
\xcd\xe1\xc5\xe6\x6b\x75\xe5\xca\x15\xd6\xaf\x5d\x47\xa5\x52\x21\
\x97\xcb\x11\x0c\x3a\x6d\x0f\x5c\x82\x0b\x06\x83\xd4\xeb\x75\xcf\
\xa3\x0d\x85\x42\xc4\xe3\x71\x8a\xc5\xe2\x42\x08\x57\x14\x39\x71\
\xe2\x04\xeb\xd7\x6f\x40\xd3\x34\x6f\xd6\x66\x59\x06\xc1\x60\x90\
\xb3\x67\x5f\xa6\xbf\xbf\x7f\xd1\xef\x6c\xd6\x9b\x6c\x27\xc3\xa5\
\xfa\x7a\xdd\x72\xfe\x58\xd3\x7a\xaa\x8f\x9b\x07\xbb\x76\xed\xe2\
\xab\x5f\xfd\x2a\xa1\x50\x88\xcf\x7f\xfe\xf3\x4b\x92\x52\xb3\xd7\
\xd5\x7c\x8d\xaf\x47\x69\xa6\x39\xf9\xcd\xb2\x2c\xaf\xcb\xc6\xec\
\xec\x2c\x17\x2f\x5e\xa4\xa3\xa3\x83\x87\x1f\x7e\x98\x9d\x3b\x77\
\x52\x98\x2f\x30\x9d\x3f\xca\xe8\x86\xcd\x94\x4b\xaa\x97\xb0\x76\
\xe2\xc4\x11\xb6\x6e\xde\x42\x26\x93\xb9\xe9\x48\xeb\x86\x11\x97\
\xd7\xd2\x24\x95\x76\x52\xc5\x3b\xd4\xd6\xfa\xa1\xd7\xb8\x6f\x47\
\x4e\x28\xe0\x69\x06\x36\xcf\x58\x1c\xad\xbc\x80\x67\x68\x9b\xc9\
\xd2\x0d\x53\xb9\xa4\x21\x22\x30\x3b\xeb\xe8\x20\x96\x4a\x25\x86\
\x86\x86\x3c\x22\x74\x43\x60\x86\x61\xa0\x9b\x06\xc5\x62\x11\x4d\
\xd3\x30\x4d\xc7\xfb\x32\x0c\xc3\x13\xf6\x1d\x18\x18\xe0\x81\x07\
\x1e\x20\x9b\xcd\x7a\x37\x80\x65\x59\x3c\xff\xfc\xf3\x94\x4a\x25\
\x0c\xc3\xa0\x52\x2a\x23\xd8\x8e\xc7\x53\x2e\x97\xe9\xed\xed\x75\
\xda\xb6\xd8\xb6\xa7\x94\x2f\x8a\x22\x13\x13\x13\x2d\x37\xb3\x61\
\x18\x9c\x3e\x7d\x9a\xf5\x1b\x46\xe9\xeb\xeb\xc3\x30\x0c\x8e\x1d\
\x3b\xd6\xe2\xc1\xc4\xa3\x31\x2e\x8e\x4f\x10\x0a\x85\x1a\x75\x5f\
\x33\x64\x32\x8e\x02\x88\xeb\x21\xb9\xbf\x45\x14\x45\xa6\xa6\xa6\
\x00\x27\x91\xa2\x3b\xd7\xc5\xb9\x73\xe7\x1a\x05\xd1\x26\x73\x73\
\x73\xc4\x62\x31\x02\x81\x80\x57\xfd\xdf\xdf\xdf\xcf\xd1\xa3\x47\
\xbd\x35\xb6\x52\xa9\x44\xb5\x5a\xf5\x3a\x4e\x6b\x9a\xc6\x8e\x1d\
\x3b\x98\x98\x98\xf0\xc2\x23\xb6\x6d\xa3\xaa\x2a\x96\x65\x71\xf7\
\xdd\x77\x73\xe6\xcc\x19\x4f\x23\xb1\xa7\xc7\x91\xaf\x32\x4d\xa7\
\x30\xba\x5c\x2e\x93\xc9\x64\x5a\xea\x47\x96\x7a\x70\x6f\x35\xe2\
\xb2\x9b\x5c\xcd\x36\xbd\x63\x9f\xd0\xde\xc0\x18\x1d\x1d\xe5\x81\
\x07\x1e\xe0\xc9\x27\x9f\xa4\x5e\xaf\xff\x8b\x1c\x53\x14\x45\x72\
\xb9\x1c\xbf\xf0\x0b\xbf\xc0\xae\x5d\xbb\x18\x19\x19\xa1\xab\xab\
\x8b\x7a\xbd\xce\xb9\x73\xe7\x38\xb0\xef\x39\xb6\x6d\xbf\x1d\xdd\
\xb0\xb8\x30\x76\x8e\x6c\x3a\xcd\xe0\xe0\x50\xc3\x11\x08\xde\x74\
\xf5\x93\x82\x7d\x03\x16\x16\x9a\x67\x03\xcd\x6e\xed\xeb\x71\xa8\
\xa5\x62\xc3\xed\x0f\xb2\x69\x9a\x5e\x2a\x76\x33\x71\xb5\x6f\xe7\
\x1a\xd7\x62\xb1\x88\x61\x18\x5e\xc7\x62\x51\x74\xb4\x07\xe7\xe7\
\xe7\xa9\x56\xab\x4e\xcd\x57\x30\x48\xad\x56\x73\x14\x27\x1a\xd9\
\x38\x6e\xdb\xfb\x60\x30\xd8\xa2\x1a\xef\x1a\x6e\x97\xa4\xdc\x8e\
\xa3\xae\x27\x63\x59\x96\xd7\x19\xd9\x25\x02\xb5\xd1\x46\xc5\xad\
\x6e\x77\x49\xd1\x9d\x0d\x35\x13\xaa\xaa\x3a\xa9\xf1\xaa\xaa\x7a\
\x64\x2d\x8a\x0b\x0a\xf9\x2e\xa9\x6b\x9a\xe6\xa9\x8a\x18\x86\x81\
\xaa\xaa\x5e\xe6\x52\xad\x56\xc3\xb6\x6d\xa7\x71\xa4\x28\x52\xab\
\xd5\xd0\x34\xad\x65\x9c\x6e\xfb\x71\x77\xfb\x6a\xb5\xea\x25\x61\
\x04\x02\x01\x2c\xcb\xf2\xc6\xdd\x4c\xc2\x95\x4a\xc5\xab\x29\x89\
\xc5\x62\x24\x93\x49\x04\x41\xa0\x54\x2a\xa1\xeb\x3a\x91\x48\xc4\
\xfb\x7e\xb1\x58\xf4\xd6\xe7\xe2\xf1\xf8\x35\x65\x67\x4c\xcb\xf2\
\xad\x62\xfb\xf3\x00\xcb\xf6\x87\xf3\xf1\xaf\x03\x55\x55\x39\x77\
\xee\x1c\x2f\xbd\xf4\x12\x67\xcf\x9e\x6d\xa9\x9d\xbc\x11\x84\x95\
\xc9\x64\xe8\xe8\xe8\x20\x9d\x4e\x33\x30\x30\x40\x2e\x97\xf3\x44\
\xcc\xc1\x29\xbb\xb9\x78\xf1\x22\x47\x8f\x1e\x21\xdb\x99\x63\x6e\
\x66\x86\xfb\xee\xbb\xcf\xb3\x29\x37\xe3\x3d\x73\x43\x88\xab\x9d\
\xc0\xda\xff\xfd\x7a\x90\xd7\x35\x67\xe6\x4b\x1c\xb7\xfd\x7b\xcd\
\x1e\x8b\xfb\x6a\xf6\x74\x9a\xe3\xca\xed\xdf\x6d\xde\x77\xbb\x70\
\x66\xfb\xe7\xed\xd2\x46\xee\xfb\xa2\x30\x58\x53\x06\x52\x33\xd1\
\x37\x1f\xa3\xfd\xf3\xe6\xfd\xb6\x37\x9d\x6c\xfe\xbb\xfd\xb3\xe6\
\x71\x35\x8f\xbb\x79\x0c\xcd\x0f\xc6\x72\xbf\x67\xa9\xf3\xdb\xfe\
\x9d\xe6\xc9\x45\xfb\x79\x76\x61\x35\x34\x16\x5f\x49\xf6\x49\xbf\
\x81\x06\x60\x49\xa3\x70\x93\x36\xc2\xf4\xbd\xb2\x7f\x7d\x68\x9a\
\x46\xa5\x52\xf1\x64\xe4\xdc\x35\xf2\x1b\x41\x5c\xee\x32\x84\xa2\
\x28\x5e\x66\x30\xe0\x75\xe5\x70\x27\xf3\xd3\xd3\xd3\x5c\xba\x74\
\x89\xa1\xa1\x21\xaf\x55\xd3\xcd\x7a\x7f\xdc\x50\xe2\xf2\xe1\xe3\
\xa7\x85\x61\x5a\x8b\x8a\xc6\x5f\x6f\xef\xc4\x2b\x0e\x47\x58\xf8\
\xc0\x5e\xb0\xf8\x82\x6f\xf0\x7d\xbc\xce\x13\x78\xd3\xb4\x10\xc5\
\xd7\x5f\x8f\xf3\x95\xf6\xd7\x4c\x5e\x2e\x81\x39\x6d\x9d\x42\x37\
\xf5\x3d\xee\x13\x97\x8f\x37\x1c\x71\x5d\x2b\x24\x28\x00\x82\x28\
\x38\x9a\x8d\x82\xcb\x35\xcd\x1e\xb8\xef\x69\xf8\xf0\xb1\x1c\x79\
\xb5\x0b\x2f\xf8\xc4\xe5\xe3\x75\x9d\xa1\xdd\x8a\xc6\xd7\xcd\x1c\
\x6c\xbf\x25\x85\x25\x66\x97\x3e\x31\xf9\xf0\x71\xeb\xc2\x27\xae\
\x37\xb0\x11\xf7\x8d\xb3\x0f\x1f\x3e\x7c\x2c\x86\xdf\x43\xe2\x8d\
\x3a\xa3\xf0\x49\xcb\x87\x0f\x1f\x3e\x7c\xe2\xf2\xe1\xc3\x87\x0f\
\x1f\x3e\x71\xf9\xf0\xe1\xc3\x87\x0f\x1f\x3e\x71\xf9\xf0\xe1\xc3\
\x87\x0f\x1f\x3e\x71\xf9\xf0\xe1\xc3\x87\x0f\x9f\xb8\x7c\xf8\xf0\
\xe1\xc3\xc7\x1b\x1f\xaf\x7b\x57\xfa\x7f\x65\xc8\x6f\xd8\x93\x0c\
\xd8\x8d\x42\x54\x1b\x47\x7e\x47\xb8\x49\x25\x78\x7c\xdc\xbc\xf0\
\xe4\xb7\x70\xda\xaa\x08\xc2\x42\xa9\xc2\x72\x7f\xf3\x26\xfc\xae\
\xfb\xdc\x09\x82\xe0\x2b\x8b\xdc\x84\x10\x04\x01\xd3\xb2\x90\x6e\
\xd2\xeb\xe6\x68\xde\xda\xe8\xa6\x81\x69\x5a\xfc\xff\xa7\x17\xd1\
\xa1\x4a\xc2\x65\xa4\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x0d\xa6\
\x2f\
\x2a\x20\x58\x50\x4d\x20\x2a\x2f\x0a\x73\x74\x61\x74\x69\x63\x20\
\x63\x68\x61\x72\x20\x2a\x69\x6e\x73\x74\x61\x6c\x6c\x2d\x75\x73\
\x62\x2d\x67\x75\x69\x2d\x31\x36\x5b\x5d\x20\x3d\x20\x7b\x0a\x2f\
\x2a\x20\x63\x6f\x6c\x75\x6d\x6e\x73\x20\x72\x6f\x77\x73\x20\x63\
\x6f\x6c\x6f\x72\x73\x20\x63\x68\x61\x72\x73\x2d\x70\x65\x72\x2d\
\x70\x69\x78\x65\x6c\x20\x2a\x2f\x0a\x22\x31\x36\x20\x31\x36\x20\
\x31\x37\x37\x20\x32\x22\x2c\x0a\x22\x20\x20\x20\x63\x20\x62\x6c\
\x61\x63\x6b\x22\x2c\x0a\x22\x2e\x20\x20\x63\x20\x67\x72\x61\x79\
\x35\x22\x2c\x0a\x22\x58\x20\x20\x63\x20\x23\x31\x39\x31\x39\x31\
\x39\x22\x2c\x0a\x22\x6f\x20\x20\x63\x20\x67\x72\x61\x79\x31\x31\
\x22\x2c\x0a\x22\x4f\x20\x20\x63\x20\x23\x31\x44\x31\x44\x31\x44\
\x22\x2c\x0a\x22\x2b\x20\x20\x63\x20\x23\x31\x45\x31\x45\x31\x45\
\x22\x2c\x0a\x22\x40\x20\x20\x63\x20\x67\x72\x61\x79\x31\x32\x22\
\x2c\x0a\x22\x23\x20\x20\x63\x20\x67\x72\x61\x79\x31\x33\x22\x2c\
\x0a\x22\x24\x20\x20\x63\x20\x23\x32\x33\x32\x33\x32\x33\x22\x2c\
\x0a\x22\x25\x20\x20\x63\x20\x23\x32\x35\x32\x35\x32\x35\x22\x2c\
\x0a\x22\x26\x20\x20\x63\x20\x67\x72\x61\x79\x31\x35\x22\x2c\x0a\
\x22\x2a\x20\x20\x63\x20\x23\x32\x38\x32\x38\x32\x38\x22\x2c\x0a\
\x22\x3d\x20\x20\x63\x20\x23\x32\x44\x32\x44\x32\x44\x22\x2c\x0a\
\x22\x2d\x20\x20\x63\x20\x67\x72\x61\x79\x31\x38\x22\x2c\x0a\x22\
\x3b\x20\x20\x63\x20\x67\x72\x61\x79\x31\x39\x22\x2c\x0a\x22\x3a\
\x20\x20\x63\x20\x23\x33\x31\x33\x31\x33\x31\x22\x2c\x0a\x22\x3e\
\x20\x20\x63\x20\x23\x33\x32\x33\x32\x33\x32\x22\x2c\x0a\x22\x2c\
\x20\x20\x63\x20\x67\x72\x61\x79\x32\x30\x22\x2c\x0a\x22\x3c\x20\
\x20\x63\x20\x67\x72\x61\x79\x32\x31\x22\x2c\x0a\x22\x31\x20\x20\
\x63\x20\x23\x33\x37\x33\x37\x33\x37\x22\x2c\x0a\x22\x32\x20\x20\
\x63\x20\x67\x72\x61\x79\x32\x32\x22\x2c\x0a\x22\x33\x20\x20\x63\
\x20\x23\x33\x39\x33\x39\x33\x39\x22\x2c\x0a\x22\x34\x20\x20\x63\
\x20\x23\x33\x41\x33\x41\x33\x41\x22\x2c\x0a\x22\x35\x20\x20\x63\
\x20\x23\x33\x38\x33\x44\x33\x41\x22\x2c\x0a\x22\x36\x20\x20\x63\
\x20\x67\x72\x61\x79\x32\x34\x22\x2c\x0a\x22\x37\x20\x20\x63\x20\
\x23\x32\x36\x34\x44\x30\x36\x22\x2c\x0a\x22\x38\x20\x20\x63\x20\
\x23\x32\x34\x35\x34\x30\x39\x22\x2c\x0a\x22\x39\x20\x20\x63\x20\
\x23\x32\x39\x35\x37\x31\x36\x22\x2c\x0a\x22\x30\x20\x20\x63\x20\
\x23\x32\x41\x34\x42\x32\x36\x22\x2c\x0a\x22\x71\x20\x20\x63\x20\
\x23\x33\x45\x34\x45\x33\x45\x22\x2c\x0a\x22\x77\x20\x20\x63\x20\
\x23\x33\x37\x35\x32\x33\x34\x22\x2c\x0a\x22\x65\x20\x20\x63\x20\
\x23\x33\x39\x35\x33\x33\x37\x22\x2c\x0a\x22\x72\x20\x20\x63\x20\
\x23\x32\x46\x36\x30\x30\x46\x22\x2c\x0a\x22\x74\x20\x20\x63\x20\
\x23\x33\x42\x37\x31\x31\x38\x22\x2c\x0a\x22\x79\x20\x20\x63\x20\
\x23\x34\x35\x36\x45\x32\x39\x22\x2c\x0a\x22\x75\x20\x20\x63\x20\
\x23\x34\x36\x37\x45\x32\x31\x22\x2c\x0a\x22\x69\x20\x20\x63\x20\
\x67\x72\x61\x79\x32\x35\x22\x2c\x0a\x22\x70\x20\x20\x63\x20\x23\
\x34\x31\x34\x31\x34\x31\x22\x2c\x0a\x22\x61\x20\x20\x63\x20\x67\
\x72\x61\x79\x32\x36\x22\x2c\x0a\x22\x73\x20\x20\x63\x20\x23\x34\
\x33\x34\x33\x34\x33\x22\x2c\x0a\x22\x64\x20\x20\x63\x20\x23\x34\
\x34\x34\x34\x34\x34\x22\x2c\x0a\x22\x66\x20\x20\x63\x20\x67\x72\
\x61\x79\x33\x31\x22\x2c\x0a\x22\x67\x20\x20\x63\x20\x23\x35\x33\
\x35\x33\x35\x34\x22\x2c\x0a\x22\x68\x20\x20\x63\x20\x23\x35\x42\
\x35\x43\x35\x41\x22\x2c\x0a\x22\x6a\x20\x20\x63\x20\x67\x72\x61\
\x79\x33\x36\x22\x2c\x0a\x22\x6b\x20\x20\x63\x20\x23\x35\x41\x36\
\x42\x35\x41\x22\x2c\x0a\x22\x6c\x20\x20\x63\x20\x23\x35\x30\x37\
\x32\x34\x41\x22\x2c\x0a\x22\x7a\x20\x20\x63\x20\x23\x36\x44\x34\
\x45\x34\x30\x22\x2c\x0a\x22\x78\x20\x20\x63\x20\x23\x37\x42\x35\
\x43\x35\x32\x22\x2c\x0a\x22\x63\x20\x20\x63\x20\x23\x36\x41\x36\
\x41\x36\x41\x22\x2c\x0a\x22\x76\x20\x20\x63\x20\x23\x36\x45\x37\
\x30\x36\x45\x22\x2c\x0a\x22\x62\x20\x20\x63\x20\x23\x36\x46\x37\
\x36\x37\x31\x22\x2c\x0a\x22\x6e\x20\x20\x63\x20\x67\x72\x61\x79\
\x34\x34\x22\x2c\x0a\x22\x6d\x20\x20\x63\x20\x23\x37\x32\x37\x32\
\x37\x32\x22\x2c\x0a\x22\x4d\x20\x20\x63\x20\x23\x37\x43\x37\x43\
\x37\x43\x22\x2c\x0a\x22\x4e\x20\x20\x63\x20\x23\x34\x37\x38\x34\
\x30\x38\x22\x2c\x0a\x22\x42\x20\x20\x63\x20\x23\x35\x41\x38\x39\
\x34\x42\x22\x2c\x0a\x22\x56\x20\x20\x63\x20\x23\x36\x43\x39\x35\
\x35\x31\x22\x2c\x0a\x22\x43\x20\x20\x63\x20\x23\x36\x43\x41\x31\
\x35\x42\x22\x2c\x0a\x22\x5a\x20\x20\x63\x20\x23\x37\x44\x41\x31\
\x37\x36\x22\x2c\x0a\x22\x41\x20\x20\x63\x20\x23\x38\x46\x37\x31\
\x36\x30\x22\x2c\x0a\x22\x53\x20\x20\x63\x20\x23\x38\x31\x42\x36\
\x36\x45\x22\x2c\x0a\x22\x44\x20\x20\x63\x20\x23\x41\x32\x39\x39\
\x36\x43\x22\x2c\x0a\x22\x46\x20\x20\x63\x20\x23\x38\x39\x43\x39\
\x36\x39\x22\x2c\x0a\x22\x47\x20\x20\x63\x20\x23\x39\x33\x44\x42\
\x36\x33\x22\x2c\x0a\x22\x48\x20\x20\x63\x20\x23\x39\x35\x44\x45\
\x36\x35\x22\x2c\x0a\x22\x4a\x20\x20\x63\x20\x23\x38\x43\x45\x32\
\x35\x31\x22\x2c\x0a\x22\x4b\x20\x20\x63\x20\x23\x39\x34\x45\x41\
\x35\x43\x22\x2c\x0a\x22\x4c\x20\x20\x63\x20\x23\x39\x45\x45\x42\
\x36\x45\x22\x2c\x0a\x22\x50\x20\x20\x63\x20\x23\x41\x32\x44\x42\
\x37\x44\x22\x2c\x0a\x22\x49\x20\x20\x63\x20\x23\x38\x30\x38\x30\
\x38\x32\x22\x2c\x0a\x22\x55\x20\x20\x63\x20\x23\x38\x34\x38\x34\
\x38\x34\x22\x2c\x0a\x22\x59\x20\x20\x63\x20\x23\x38\x34\x38\x36\
\x38\x37\x22\x2c\x0a\x22\x54\x20\x20\x63\x20\x23\x38\x38\x38\x38\
\x38\x38\x22\x2c\x0a\x22\x52\x20\x20\x63\x20\x23\x38\x41\x38\x42\
\x38\x41\x22\x2c\x0a\x22\x45\x20\x20\x63\x20\x23\x38\x46\x38\x42\
\x38\x38\x22\x2c\x0a\x22\x57\x20\x20\x63\x20\x23\x38\x43\x38\x43\
\x38\x42\x22\x2c\x0a\x22\x51\x20\x20\x63\x20\x67\x72\x61\x79\x35\
\x36\x22\x2c\x0a\x22\x21\x20\x20\x63\x20\x23\x38\x33\x39\x30\x38\
\x33\x22\x2c\x0a\x22\x7e\x20\x20\x63\x20\x23\x38\x46\x39\x30\x39\
\x36\x22\x2c\x0a\x22\x5e\x20\x20\x63\x20\x23\x39\x33\x39\x33\x38\
\x42\x22\x2c\x0a\x22\x2f\x20\x20\x63\x20\x23\x39\x33\x39\x33\x39\
\x33\x22\x2c\x0a\x22\x28\x20\x20\x63\x20\x23\x39\x33\x39\x35\x39\
\x36\x22\x2c\x0a\x22\x29\x20\x20\x63\x20\x23\x39\x35\x39\x36\x39\
\x44\x22\x2c\x0a\x22\x5f\x20\x20\x63\x20\x23\x39\x38\x39\x39\x41\
\x30\x22\x2c\x0a\x22\x60\x20\x20\x63\x20\x23\x39\x44\x39\x45\x41\
\x30\x22\x2c\x0a\x22\x27\x20\x20\x63\x20\x23\x39\x43\x39\x45\x41\
\x35\x22\x2c\x0a\x22\x5d\x20\x20\x63\x20\x23\x39\x45\x41\x30\x41\
\x37\x22\x2c\x0a\x22\x5b\x20\x20\x63\x20\x23\x41\x34\x39\x41\x41\
\x30\x22\x2c\x0a\x22\x7b\x20\x20\x63\x20\x23\x41\x30\x41\x30\x41\
\x30\x22\x2c\x0a\x22\x7d\x20\x20\x63\x20\x23\x41\x31\x41\x31\x41\
\x30\x22\x2c\x0a\x22\x7c\x20\x20\x63\x20\x23\x41\x30\x41\x35\x41\
\x35\x22\x2c\x0a\x22\x20\x2e\x20\x63\x20\x23\x41\x37\x41\x37\x41\
\x41\x22\x2c\x0a\x22\x2e\x2e\x20\x63\x20\x23\x41\x34\x41\x36\x41\
\x43\x22\x2c\x0a\x22\x58\x2e\x20\x63\x20\x23\x41\x34\x41\x38\x41\
\x35\x22\x2c\x0a\x22\x6f\x2e\x20\x63\x20\x23\x41\x39\x41\x39\x41\
\x42\x22\x2c\x0a\x22\x4f\x2e\x20\x63\x20\x23\x41\x41\x41\x41\x41\
\x41\x22\x2c\x0a\x22\x2b\x2e\x20\x63\x20\x23\x41\x39\x41\x41\x41\
\x43\x22\x2c\x0a\x22\x40\x2e\x20\x63\x20\x23\x41\x41\x41\x42\x41\
\x45\x22\x2c\x0a\x22\x23\x2e\x20\x63\x20\x23\x41\x43\x41\x43\x41\
\x43\x22\x2c\x0a\x22\x24\x2e\x20\x63\x20\x23\x41\x41\x41\x43\x42\
\x34\x22\x2c\x0a\x22\x25\x2e\x20\x63\x20\x23\x41\x41\x42\x44\x41\
\x33\x22\x2c\x0a\x22\x26\x2e\x20\x63\x20\x67\x72\x61\x79\x36\x39\
\x22\x2c\x0a\x22\x2a\x2e\x20\x63\x20\x23\x42\x31\x42\x31\x42\x31\
\x22\x2c\x0a\x22\x3d\x2e\x20\x63\x20\x23\x42\x32\x42\x32\x42\x32\
\x22\x2c\x0a\x22\x2d\x2e\x20\x63\x20\x23\x42\x33\x42\x33\x42\x36\
\x22\x2c\x0a\x22\x3b\x2e\x20\x63\x20\x23\x42\x33\x42\x37\x42\x37\
\x22\x2c\x0a\x22\x3a\x2e\x20\x63\x20\x67\x72\x61\x79\x37\x31\x22\
\x2c\x0a\x22\x3e\x2e\x20\x63\x20\x67\x72\x61\x79\x37\x32\x22\x2c\
\x0a\x22\x2c\x2e\x20\x63\x20\x23\x42\x39\x42\x39\x42\x39\x22\x2c\
\x0a\x22\x3c\x2e\x20\x63\x20\x67\x72\x61\x79\x37\x34\x22\x2c\x0a\
\x22\x31\x2e\x20\x63\x20\x67\x72\x61\x79\x22\x2c\x0a\x22\x32\x2e\
\x20\x63\x20\x23\x42\x42\x42\x43\x43\x34\x22\x2c\x0a\x22\x33\x2e\
\x20\x63\x20\x23\x42\x44\x42\x44\x43\x31\x22\x2c\x0a\x22\x34\x2e\
\x20\x63\x20\x23\x43\x32\x44\x42\x42\x33\x22\x2c\x0a\x22\x35\x2e\
\x20\x63\x20\x23\x43\x38\x45\x35\x42\x38\x22\x2c\x0a\x22\x36\x2e\
\x20\x63\x20\x23\x43\x31\x43\x31\x43\x34\x22\x2c\x0a\x22\x37\x2e\
\x20\x63\x20\x23\x43\x32\x43\x32\x43\x35\x22\x2c\x0a\x22\x38\x2e\
\x20\x63\x20\x23\x43\x33\x43\x33\x43\x36\x22\x2c\x0a\x22\x39\x2e\
\x20\x63\x20\x23\x43\x30\x43\x34\x43\x31\x22\x2c\x0a\x22\x30\x2e\
\x20\x63\x20\x23\x43\x30\x43\x35\x43\x34\x22\x2c\x0a\x22\x71\x2e\
\x20\x63\x20\x23\x43\x34\x43\x34\x43\x37\x22\x2c\x0a\x22\x77\x2e\
\x20\x63\x20\x23\x43\x36\x43\x36\x43\x36\x22\x2c\x0a\x22\x65\x2e\
\x20\x63\x20\x23\x43\x32\x43\x33\x43\x41\x22\x2c\x0a\x22\x72\x2e\
\x20\x63\x20\x23\x43\x36\x43\x36\x43\x39\x22\x2c\x0a\x22\x74\x2e\
\x20\x63\x20\x23\x43\x36\x43\x41\x43\x42\x22\x2c\x0a\x22\x79\x2e\
\x20\x63\x20\x23\x43\x41\x43\x41\x43\x41\x22\x2c\x0a\x22\x75\x2e\
\x20\x63\x20\x23\x43\x39\x43\x39\x43\x43\x22\x2c\x0a\x22\x69\x2e\
\x20\x63\x20\x23\x43\x42\x43\x42\x43\x45\x22\x2c\x0a\x22\x70\x2e\
\x20\x63\x20\x67\x72\x61\x79\x38\x30\x22\x2c\x0a\x22\x61\x2e\x20\
\x63\x20\x23\x43\x45\x43\x45\x43\x45\x22\x2c\x0a\x22\x73\x2e\x20\
\x63\x20\x23\x44\x30\x44\x30\x44\x30\x22\x2c\x0a\x22\x64\x2e\x20\
\x63\x20\x23\x44\x32\x44\x33\x44\x32\x22\x2c\x0a\x22\x66\x2e\x20\
\x63\x20\x4c\x69\x67\x68\x74\x47\x72\x61\x79\x22\x2c\x0a\x22\x67\
\x2e\x20\x63\x20\x23\x44\x32\x44\x32\x44\x35\x22\x2c\x0a\x22\x68\
\x2e\x20\x63\x20\x23\x44\x32\x44\x33\x44\x34\x22\x2c\x0a\x22\x6a\
\x2e\x20\x63\x20\x23\x44\x32\x44\x34\x44\x33\x22\x2c\x0a\x22\x6b\
\x2e\x20\x63\x20\x67\x72\x61\x79\x38\x33\x22\x2c\x0a\x22\x6c\x2e\
\x20\x63\x20\x23\x44\x34\x44\x34\x44\x37\x22\x2c\x0a\x22\x7a\x2e\
\x20\x63\x20\x23\x44\x35\x44\x35\x44\x39\x22\x2c\x0a\x22\x78\x2e\
\x20\x63\x20\x23\x44\x34\x44\x36\x44\x42\x22\x2c\x0a\x22\x63\x2e\
\x20\x63\x20\x23\x44\x35\x44\x36\x44\x42\x22\x2c\x0a\x22\x76\x2e\
\x20\x63\x20\x23\x44\x35\x44\x38\x44\x36\x22\x2c\x0a\x22\x62\x2e\
\x20\x63\x20\x67\x72\x61\x79\x38\x35\x22\x2c\x0a\x22\x6e\x2e\x20\
\x63\x20\x23\x44\x38\x44\x42\x44\x42\x22\x2c\x0a\x22\x6d\x2e\x20\
\x63\x20\x67\x72\x61\x79\x38\x36\x22\x2c\x0a\x22\x4d\x2e\x20\x63\
\x20\x23\x44\x38\x44\x38\x44\x44\x22\x2c\x0a\x22\x4e\x2e\x20\x63\
\x20\x23\x44\x41\x44\x38\x44\x44\x22\x2c\x0a\x22\x42\x2e\x20\x63\
\x20\x23\x44\x41\x44\x43\x44\x45\x22\x2c\x0a\x22\x56\x2e\x20\x63\
\x20\x23\x44\x41\x44\x45\x44\x43\x22\x2c\x0a\x22\x43\x2e\x20\x63\
\x20\x23\x44\x44\x44\x44\x44\x44\x22\x2c\x0a\x22\x5a\x2e\x20\x63\
\x20\x23\x44\x43\x44\x44\x44\x46\x22\x2c\x0a\x22\x41\x2e\x20\x63\
\x20\x23\x44\x42\x44\x43\x45\x30\x22\x2c\x0a\x22\x53\x2e\x20\x63\
\x20\x23\x44\x46\x45\x30\x45\x32\x22\x2c\x0a\x22\x44\x2e\x20\x63\
\x20\x23\x45\x31\x45\x31\x45\x31\x22\x2c\x0a\x22\x46\x2e\x20\x63\
\x20\x23\x45\x32\x45\x32\x45\x32\x22\x2c\x0a\x22\x47\x2e\x20\x63\
\x20\x23\x45\x30\x45\x31\x45\x36\x22\x2c\x0a\x22\x48\x2e\x20\x63\
\x20\x23\x45\x34\x45\x34\x45\x34\x22\x2c\x0a\x22\x4a\x2e\x20\x63\
\x20\x23\x45\x37\x45\x37\x45\x37\x22\x2c\x0a\x22\x4b\x2e\x20\x63\
\x20\x23\x45\x33\x45\x33\x45\x38\x22\x2c\x0a\x22\x4c\x2e\x20\x63\
\x20\x23\x45\x35\x45\x35\x45\x39\x22\x2c\x0a\x22\x50\x2e\x20\x63\
\x20\x23\x45\x36\x45\x36\x45\x41\x22\x2c\x0a\x22\x49\x2e\x20\x63\
\x20\x23\x45\x37\x45\x37\x45\x42\x22\x2c\x0a\x22\x55\x2e\x20\x63\
\x20\x23\x45\x37\x45\x37\x45\x43\x22\x2c\x0a\x22\x59\x2e\x20\x63\
\x20\x23\x45\x37\x45\x38\x45\x43\x22\x2c\x0a\x22\x54\x2e\x20\x63\
\x20\x67\x72\x61\x79\x39\x31\x22\x2c\x0a\x22\x52\x2e\x20\x63\x20\
\x23\x45\x39\x45\x39\x45\x39\x22\x2c\x0a\x22\x45\x2e\x20\x63\x20\
\x23\x45\x41\x45\x41\x45\x41\x22\x2c\x0a\x22\x57\x2e\x20\x63\x20\
\x23\x45\x38\x45\x38\x45\x43\x22\x2c\x0a\x22\x51\x2e\x20\x63\x20\
\x23\x45\x39\x45\x39\x45\x44\x22\x2c\x0a\x22\x21\x2e\x20\x63\x20\
\x23\x45\x42\x45\x42\x45\x46\x22\x2c\x0a\x22\x7e\x2e\x20\x63\x20\
\x23\x45\x43\x45\x43\x45\x43\x22\x2c\x0a\x22\x5e\x2e\x20\x63\x20\
\x67\x72\x61\x79\x39\x33\x22\x2c\x0a\x22\x2f\x2e\x20\x63\x20\x23\
\x45\x45\x45\x45\x45\x45\x22\x2c\x0a\x22\x28\x2e\x20\x63\x20\x23\
\x45\x43\x45\x43\x46\x30\x22\x2c\x0a\x22\x29\x2e\x20\x63\x20\x67\
\x72\x61\x79\x31\x30\x30\x22\x2c\x0a\x22\x5f\x2e\x20\x63\x20\x4e\
\x6f\x6e\x65\x22\x2c\x0a\x2f\x2a\x20\x70\x69\x78\x65\x6c\x73\x20\
\x2a\x2f\x0a\x22\x5f\x2e\x5f\x2e\x73\x20\x73\x20\x69\x20\x69\x20\
\x64\x20\x64\x20\x64\x20\x73\x20\x64\x20\x35\x20\x2c\x20\x5f\x2e\
\x5f\x2e\x5f\x2e\x22\x2c\x0a\x22\x5f\x2e\x33\x20\x51\x20\x3a\x2e\
\x77\x2e\x70\x2e\x70\x2e\x77\x2e\x3d\x2e\x26\x2e\x2a\x2e\x4d\x20\
\x2a\x20\x5f\x2e\x5f\x2e\x5f\x2e\x22\x2c\x0a\x22\x3d\x20\x54\x20\
\x28\x20\x2e\x2e\x5d\x20\x20\x2e\x3c\x2e\x23\x2e\x23\x2e\x26\x2e\
\x3d\x2e\x55\x20\x24\x20\x5f\x2e\x5f\x2e\x5f\x2e\x22\x2c\x0a\x22\
\x3d\x20\x59\x20\x5f\x20\x5d\x20\x32\x2e\x34\x2e\x5a\x20\x21\x20\
\x4f\x2e\x2d\x2e\x73\x2e\x2f\x20\x24\x20\x5f\x2e\x5f\x2e\x5f\x2e\
\x22\x2c\x0a\x22\x31\x20\x27\x20\x7e\x20\x7a\x20\x78\x20\x63\x2e\
\x48\x20\x4c\x20\x6c\x20\x73\x20\x64\x20\x3d\x20\x6f\x20\x5f\x2e\
\x5f\x2e\x5f\x2e\x22\x2c\x0a\x22\x67\x20\x29\x20\x5b\x20\x37\x20\
\x4e\x20\x4e\x2e\x41\x2e\x47\x20\x4b\x20\x43\x20\x35\x20\x40\x20\
\x3a\x20\x5f\x2e\x5f\x2e\x5f\x2e\x22\x2c\x0a\x22\x3d\x20\x24\x2e\
\x65\x2e\x41\x20\x44\x20\x33\x2e\x4e\x2e\x55\x2e\x34\x2e\x4a\x20\
\x46\x20\x71\x20\x3a\x20\x5f\x2e\x5f\x2e\x5f\x2e\x22\x2c\x0a\x22\
\x2e\x20\x62\x20\x3b\x2e\x74\x2e\x47\x2e\x2d\x2e\x20\x2e\x36\x2e\
\x72\x2e\x5a\x2e\x35\x2e\x50\x20\x65\x20\x3a\x20\x5f\x2e\x5f\x2e\
\x22\x2c\x0a\x22\x5f\x2e\x25\x20\x77\x20\x72\x20\x56\x20\x42\x2e\
\x21\x2e\x4f\x2e\x36\x2e\x7a\x2e\x21\x2e\x55\x2e\x76\x2e\x76\x20\
\x5f\x2e\x5f\x2e\x22\x2c\x0a\x22\x5f\x2e\x20\x20\x58\x20\x30\x20\
\x38\x20\x75\x20\x6e\x2e\x72\x2e\x71\x2e\x75\x2e\x49\x2e\x49\x2e\
\x5a\x2e\x68\x20\x3a\x20\x5f\x2e\x22\x2c\x0a\x22\x33\x20\x63\x20\
\x6e\x20\x6d\x20\x6b\x20\x39\x20\x74\x20\x5a\x2e\x49\x20\x6c\x2e\
\x69\x2e\x49\x2e\x64\x2e\x7d\x20\x3a\x20\x5f\x2e\x22\x2c\x0a\x22\
\x66\x20\x69\x2e\x6a\x2e\x62\x2e\x5a\x2e\x39\x2e\x42\x20\x79\x20\
\x57\x2e\x57\x2e\x49\x2e\x6a\x2e\x7e\x2e\x5e\x20\x2c\x2e\x6a\x20\
\x22\x2c\x0a\x22\x66\x20\x6a\x2e\x43\x2e\x44\x2e\x54\x2e\x54\x2e\
\x44\x2e\x5a\x20\x25\x2e\x49\x2e\x68\x2e\x63\x2e\x67\x2e\x44\x2e\
\x48\x2e\x51\x20\x22\x2c\x0a\x22\x64\x20\x6a\x2e\x6d\x2e\x46\x2e\
\x54\x2e\x7e\x2e\x2f\x2e\x54\x2e\x62\x2e\x31\x2e\x54\x20\x2b\x2e\
\x45\x20\x53\x2e\x7b\x20\x3a\x20\x22\x2c\x0a\x22\x40\x20\x3d\x20\
\x3d\x20\x3a\x20\x3a\x20\x3a\x20\x3a\x20\x3a\x20\x3a\x20\x3d\x20\
\x40\x20\x25\x20\x60\x20\x2b\x2e\x31\x20\x5f\x2e\x22\x2c\x0a\x22\
\x5f\x2e\x5f\x2e\x5f\x2e\x5f\x2e\x5f\x2e\x5f\x2e\x5f\x2e\x5f\x2e\
\x5f\x2e\x5f\x2e\x5f\x2e\x3a\x20\x3a\x20\x31\x20\x5f\x2e\x5f\x2e\
\x22\x0a\x7d\x3b\x0a\
"
qt_resource_name = "\
\x00\x09\
\x0c\x78\x54\x88\
\x00\x6e\
\x00\x65\x00\x77\x00\x50\x00\x72\x00\x65\x00\x66\x00\x69\x00\x78\
\x00\x07\
\x07\x0f\x38\x93\
\x00\x70\
\x00\x69\x00\x78\x00\x6d\x00\x61\x00\x70\x00\x73\
\x00\x1b\
\x0a\x47\x98\xc7\
\x00\x6c\
\x00\x69\x00\x76\x00\x65\x00\x75\x00\x73\x00\x62\x00\x2d\x00\x63\x00\x72\x00\x65\x00\x61\x00\x74\x00\x6f\x00\x72\x00\x2d\x00\x72\
\x00\x65\x00\x66\x00\x72\x00\x65\x00\x73\x00\x68\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1a\
\x0e\xa6\x5a\xe7\
\x00\x6c\
\x00\x69\x00\x76\x00\x65\x00\x75\x00\x73\x00\x62\x00\x2d\x00\x63\x00\x72\x00\x65\x00\x61\x00\x74\x00\x6f\x00\x72\x00\x2d\x00\x68\
\x00\x65\x00\x61\x00\x64\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x16\
\x02\x24\xfb\x0d\
\x00\x6c\
\x00\x69\x00\x76\x00\x65\x00\x75\x00\x73\x00\x62\x00\x2d\x00\x63\x00\x72\x00\x65\x00\x61\x00\x74\x00\x6f\x00\x72\x00\x2d\x00\x31\
\x00\x36\x00\x2e\x00\x78\x00\x70\x00\x6d\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x18\x00\x02\x00\x00\x00\x03\x00\x00\x00\x03\
\x00\x00\x00\xa2\x00\x00\x00\x00\x00\x01\x00\x00\x3e\x4a\
\x00\x00\x00\x2c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x68\x00\x00\x00\x00\x00\x01\x00\x00\x03\x94\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| gpl-2.0 | 7,992,807,113,800,451,000 | 63.178404 | 129 | 0.726835 | false |
jsbitcoin/bpp-client | lib/deterministic.py | 1 | 4488 | from pybitcointools import *
import hmac, hashlib
### Electrum wallets
def electrum_stretch(seed): return slowsha(seed)
# Accepts seed or stretched seed, returns master public key
def electrum_mpk(seed):
if len(seed) == 32: seed = electrum_stretch(seed)
return privkey_to_pubkey(seed)[2:]
# Accepts (seed or stretched seed), index and secondary index
# (conventionally 0 for ordinary addresses, 1 for change) , returns privkey
def electrum_privkey(seed,n,for_change=0):
if len(seed) == 32: seed = electrum_stretch(seed)
mpk = electrum_mpk(seed)
offset = dbl_sha256(str(n)+':'+str(for_change)+':'+mpk.decode('hex'))
return add_privkeys(seed, offset)
# Accepts (seed or stretched seed or master public key), index and secondary index
# (conventionally 0 for ordinary addresses, 1 for change) , returns pubkey
def electrum_pubkey(masterkey,n,for_change=0):
if len(masterkey) == 32: mpk = electrum_mpk(electrum_stretch(masterkey))
elif len(masterkey) == 64: mpk = electrum_mpk(masterkey)
else: mpk = masterkey
bin_mpk = encode_pubkey(mpk,'bin_electrum')
offset = bin_dbl_sha256(str(n)+':'+str(for_change)+':'+bin_mpk)
return add_pubkeys('04'+mpk,privtopub(offset))
# seed/stretched seed/pubkey -> address (convenience method)
def electrum_address(masterkey,n,for_change=0,version=0):
return pubkey_to_address(electrum_pubkey(masterkey,n,for_change),version)
# Below code ASSUMES binary inputs and compressed pubkeys
PRIVATE = '\x04\x88\xAD\xE4'
PUBLIC = '\x04\x88\xB2\x1E'
# BIP32 child key derivation
def raw_bip32_ckd(rawtuple, i):
vbytes, depth, fingerprint, oldi, chaincode, key = rawtuple
i = int(i)
if i < 0: i = abs(i) + 2**31
if vbytes == PRIVATE:
priv = key
pub = privtopub(key)
else:
pub = key
if i >= 2**31:
if vbytes == PUBLIC:
raise Exception("Can't do private derivation on public key!")
I = hmac.new(chaincode,'\x00'+priv[:32]+encode(i,256,4),hashlib.sha512).digest()
else:
I = hmac.new(chaincode,pub+encode(i,256,4),hashlib.sha512).digest()
if vbytes == PRIVATE:
newkey = add_privkeys(I[:32]+'\x01',priv)
fingerprint = bin_hash160(privtopub(key))[:4]
if vbytes == PUBLIC:
newkey = add_pubkeys(compress(privtopub(I[:32])),key)
fingerprint = bin_hash160(key)[:4]
return (vbytes, depth + 1, fingerprint, i, I[32:], newkey)
def bip32_serialize(rawtuple):
vbytes, depth, fingerprint, i, chaincode, key = rawtuple
depth = chr(depth % 256)
i = encode(i,256,4)
chaincode = encode(hash_to_int(chaincode),256,32)
keydata = '\x00'+key[:-1] if vbytes == PRIVATE else key
bindata = vbytes + depth + fingerprint + i + chaincode + keydata
return changebase(bindata+bin_dbl_sha256(bindata)[:4],256,58)
def bip32_deserialize(data):
dbin = changebase(data,58,256)
if bin_dbl_sha256(dbin[:-4])[:4] != dbin[-4:]:
raise Exception("Invalid checksum")
vbytes = dbin[0:4]
depth = ord(dbin[4])
fingerprint = dbin[5:9]
i = decode(dbin[9:13],256)
chaincode = dbin[13:45]
key = dbin[46:78]+'\x01' if vbytes == PRIVATE else dbin[45:78]
return (vbytes, depth, fingerprint, i, chaincode, key)
def raw_bip32_privtopub(rawtuple):
vbytes, depth, fingerprint, i, chaincode, key = rawtuple
return (PUBLIC, depth, fingerprint, i, chaincode, privtopub(key))
def bip32_privtopub(data):
return bip32_serialize(raw_bip32_privtopub(bip32_deserialize(data)))
def bip32_ckd(data,i):
return bip32_serialize(raw_bip32_ckd(bip32_deserialize(data),i))
def bip32_master_key(seed):
I = hmac.new("Bitcoin seed",seed,hashlib.sha512).digest()
return bip32_serialize((PRIVATE, 0, '\x00'*4, 0, I[32:], I[:32]+'\x01'))
def bip32_bin_extract_key(data):
return bip32_deserialize(data)[-1]
def bip32_extract_key(data, fmt=None):
if fmt:
return encode_privkey(bip32_deserialize(data)[-1], fmt)
else:
return bip32_deserialize(data)[-1].encode('hex')
def subkey_for_path(data, path):
force_public = (path[-4:] == '.pub')
if force_public:
path = path[:-4]
key = data
if path:
invocations = path.split("/")
for v in invocations:
is_prime = v[-1] in ("'p")
if is_prime: v = v[:-1]
v = int(v) + (2**31 * is_prime)
key = bip32_ckd(key, v)
if force_public and key[0:4] == "xprv":
key = bip32_privtopub(key)
return key
| gpl-3.0 | -7,851,304,233,098,060,000 | 34.904 | 88 | 0.649287 | false |
shadowmint/nwidget | samples/snake/model/game.py | 1 | 1498 | # Copyright 2013 Douglas Linder
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import pyglet
import cocos
import nwidget
import views
class Game(object):
def run(self, testing=False):
assets = nwidget.Assets()
if testing:
layer = views.TestView(assets)
else:
layer = views.MainView(assets)
scene = cocos.scene.Scene(layer)
cocos.director.director.run(scene)
@staticmethod
def menu():
assets = nwidget.Assets()
layer = views.MainView(assets)
scene = cocos.scene.Scene(layer)
cocos.director.director.run(scene)
@staticmethod
def play():
assets = nwidget.Assets()
layer = views.GameView(assets)
scene = cocos.scene.Scene(layer)
cocos.director.director.run(scene)
@staticmethod
def credits():
assets = nwidget.Assets()
layer = views.CreditsView(assets)
scene = cocos.scene.Scene(layer)
cocos.director.director.run(scene)
@staticmethod
def exit():
exit(0)
| apache-2.0 | 7,245,439,001,718,751,000 | 26.740741 | 74 | 0.712951 | false |
ElMejorEquipoDeLaSerena/VariableStarsClassification | final_program.py | 1 | 3159 | import numpy as np
import urllib
import matplotlib.pyplot as plt
import sys
import pylab as pl
from sklearn import svm
from sklearn import svm, cross_validation
from sklearn.grid_search import GridSearchCV
# Load our matched data from the catalog
cat_data = 'catalog_data_final/matched_data.csv'
catalog = np.genfromtxt(cat_data, dtype=None, names=True, delimiter=',')
# Pull out certain features so we can work with them separately
data = catalog
classes, mag, v_mag, blend, amplitude, period, epoch_folding = data['Var_Type'], data['Mag'], data["V_mag"], data['Blend'], data['Amplitude'], data['Period_days'], data['epoch_folding']
# Grab our featuers and transpore to use in SVM
features = v_mag, amplitude, period
data_svm = np.array(features).transpose()
# How many of each Variable Type do we have
for x in range(1,4):
print("Class size: {} {}".format(x,len(classes[classes == x])))
# Figure out how many we need to train for accuracy
# Test size
N_test = 5000
clf = svm.LinearSVC()
# X_train, X_test, y_train, y_test = cross_validation.train_test_split (data_svm, classes, test_size=1./3.)
# print("training set = {} {}".format( X_train.shape, y_train.shape ))
# print("test size = {} {}".format(X_test.shape, y_test.shape))
# clf.fit(X_train, y_train)
# pred_class = clf.predict(X_test)
# N_match = (pred_class == y_test).sum()
# print("N_match = {}".format(N_match))
# acc = 1. * N_match / len(pred_class)
# print("Accuracy = {}".format(acc))
# ss = cross_validation.StratifiedShuffleSplit(classes, 5, test_size = 1./3.)
# scores = cross_validation.cross_val_score(clf, data_svm, classes, cv=ss)
# print("Accuracy = {} +- {}",format(scores.mean(),scores.std()))
# Training Sizes
Ns = 2**np.arange(2,12)
print("Ns = {}".format(Ns))
scores = np.zeros(len(Ns))
stds = np.zeros(len(Ns))
for i in range(len(Ns)):
N = Ns[i]
ss = cross_validation.StratifiedShuffleSplit(classes, 5, test_size = N_test, train_size = N)
scores_i = cross_validation.cross_val_score(clf, data_svm, classes, cv=ss)
scores[i] = scores_i.mean()
stds[i] = scores_i.std()
pl.clf()
fig = pl.figure()
ax = fig.add_subplot(1,1,1)
ax.errorbar (Ns, scores, yerr = stds)
ax.set_xscale("log")
ax.set_xlabel("N")
ax.set_ylabel("Accuracy")
pl.savefig('optimal_training_size.png')
scores.argmax()
optimal_n = 64
print("Optimal N = {}".format(optimal_n))
# Optimal C size
N_train = optimal_n
N_test = optimal_n
C_range = 10. ** np.arange(-5, 5)
param_grid = dict(C=C_range)
ss = cross_validation.StratifiedShuffleSplit(classes, 5, test_size = N_test, train_size = N_train)
grid = GridSearchCV(svm.LinearSVC(), param_grid=param_grid, cv=ss)
grid.fit (data_svm, classes)
print("The best classifier is: ".format(grid.best_estimator_))
# plot the scores of the grid grid_scores_ contains parameter settings and scores
# grid_scores_ contains parameter settings and scores
score_dict = grid.grid_scores_
# We extract just the scores
scores = [x[1] for x in score_dict]
pl.clf()
fig = pl.figure()
ax = fig.add_subplot(1,1,1)
ax.plot (C_range, scores)
ax.set_xscale("log")
ax.set_xlabel("C")
ax.set_ylabel("Accuracy")
pl.savefig('optimal_c_size.png') | mit | -7,094,549,220,741,888,000 | 29.980392 | 185 | 0.690408 | false |
FrancoisRheaultUS/dipy | dipy/core/tests/test_rng.py | 8 | 1479 | """File dedicated to test ``dipy.core.rng`` module."""
from scipy.stats import chisquare
from dipy.core import rng
import numpy.testing as npt
def test_wichmann_hill2006():
n_generated = [rng.WichmannHill2006() for i in range(10000)]
# The chi-squared test statistic as result and The p-value of the test
chisq, pvalue = chisquare(n_generated)
# P-values equal 1 show evidence of the null hypothesis which indicates
# that it is uniformly distributed. This is what we want to check here
npt.assert_almost_equal(pvalue, 1.0)
npt.assert_raises(ValueError, rng.WichmannHill2006, ix=0)
def test_wichmann_hill1982():
n_generated = [rng.WichmannHill1982() for i in range(10000)]
chisq, pvalue = chisquare(n_generated)
# P-values equal 1 show evidence of the null hypothesis which indicates
# that it is uniformly distributed. This is what we want to check here
npt.assert_almost_equal(pvalue, 1.0)
npt.assert_raises(ValueError, rng.WichmannHill1982, iz=0)
def test_LEcuyer():
n_generated = [rng.LEcuyer() for i in range(10000)]
chisq, pvalue = chisquare(n_generated)
# P-values equal 1 show evidence of the null hypothesis which indicates
# that it is uniformly distributed. This is what we want to check here
npt.assert_almost_equal(pvalue, 1.0)
npt.assert_raises(ValueError, rng.LEcuyer, s2=0)
if __name__ == "__main__":
test_LEcuyer()
test_wichmann_hill2006()
test_wichmann_hill1982()
| bsd-3-clause | -8,753,347,949,488,625,000 | 36.923077 | 75 | 0.713996 | false |
freakboy3742/django | tests/template_tests/syntax_tests/test_url.py | 14 | 12493 | from django.template import RequestContext, TemplateSyntaxError
from django.template.defaulttags import URLNode
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.urls import NoReverseMatch, resolve
from ..utils import setup
@override_settings(ROOT_URLCONF='template_tests.urls')
class UrlTagTests(SimpleTestCase):
request_factory = RequestFactory()
# Successes
@setup({'url01': '{% url "client" client.id %}'})
def test_url01(self):
output = self.engine.render_to_string('url01', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/')
@setup({'url02': '{% url "client_action" id=client.id action="update" %}'})
def test_url02(self):
output = self.engine.render_to_string('url02', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02a': '{% url "client_action" client.id "update" %}'})
def test_url02a(self):
output = self.engine.render_to_string('url02a', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02b': "{% url 'client_action' id=client.id action='update' %}"})
def test_url02b(self):
output = self.engine.render_to_string('url02b', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02c': "{% url 'client_action' client.id 'update' %}"})
def test_url02c(self):
output = self.engine.render_to_string('url02c', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url03': '{% url "index" %}'})
def test_url03(self):
output = self.engine.render_to_string('url03')
self.assertEqual(output, '/')
@setup({'url04': '{% url "named.client" client.id %}'})
def test_url04(self):
output = self.engine.render_to_string('url04', {'client': {'id': 1}})
self.assertEqual(output, '/named-client/1/')
@setup({'url05': '{% url "метка_оператора" v %}'})
def test_url05(self):
output = self.engine.render_to_string('url05', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url06': '{% url "метка_оператора_2" tag=v %}'})
def test_url06(self):
output = self.engine.render_to_string('url06', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url08': '{% url "метка_оператора" v %}'})
def test_url08(self):
output = self.engine.render_to_string('url08', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url09': '{% url "метка_оператора_2" tag=v %}'})
def test_url09(self):
output = self.engine.render_to_string('url09', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url10': '{% url "client_action" id=client.id action="two words" %}'})
def test_url10(self):
output = self.engine.render_to_string('url10', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/two%20words/')
@setup({'url11': '{% url "client_action" id=client.id action="==" %}'})
def test_url11(self):
output = self.engine.render_to_string('url11', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/==/')
@setup({'url12': '{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}'})
def test_url12(self):
output = self.engine.render_to_string('url12', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/!$&'()*+,;=~:@,/')
@setup({'url13': '{% url "client_action" id=client.id action=arg|join:"-" %}'})
def test_url13(self):
output = self.engine.render_to_string('url13', {'client': {'id': 1}, 'arg': ['a', 'b']})
self.assertEqual(output, '/client/1/a-b/')
@setup({'url14': '{% url "client_action" client.id arg|join:"-" %}'})
def test_url14(self):
output = self.engine.render_to_string('url14', {'client': {'id': 1}, 'arg': ['a', 'b']})
self.assertEqual(output, '/client/1/a-b/')
@setup({'url15': '{% url "client_action" 12 "test" %}'})
def test_url15(self):
output = self.engine.render_to_string('url15')
self.assertEqual(output, '/client/12/test/')
@setup({'url18': '{% url "client" "1,2" %}'})
def test_url18(self):
output = self.engine.render_to_string('url18')
self.assertEqual(output, '/client/1,2/')
@setup({'url19': '{% url named_url client.id %}'})
def test_url19(self):
output = self.engine.render_to_string(
'url19', {'client': {'id': 1}, 'named_url': 'client'}
)
self.assertEqual(output, '/client/1/')
@setup({'url20': '{% url url_name_in_var client.id %}'})
def test_url20(self):
output = self.engine.render_to_string('url20', {'client': {'id': 1}, 'url_name_in_var': 'named.client'})
self.assertEqual(output, '/named-client/1/')
@setup({'url21': '{% autoescape off %}'
'{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}'
'{% endautoescape %}'})
def test_url21(self):
output = self.engine.render_to_string('url21', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/!$&\'()*+,;=~:@,/')
# Failures
@setup({'url-fail01': '{% url %}'})
def test_url_fail01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail01')
@setup({'url-fail02': '{% url "no_such_view" %}'})
def test_url_fail02(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail02')
@setup({'url-fail03': '{% url "client" %}'})
def test_url_fail03(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail03')
@setup({'url-fail04': '{% url "view" id, %}'})
def test_url_fail04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail04')
@setup({'url-fail05': '{% url "view" id= %}'})
def test_url_fail05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail05')
@setup({'url-fail06': '{% url "view" a.id=id %}'})
def test_url_fail06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail06')
@setup({'url-fail07': '{% url "view" a.id!id %}'})
def test_url_fail07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail07')
@setup({'url-fail08': '{% url "view" id="unterminatedstring %}'})
def test_url_fail08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail08')
@setup({'url-fail09': '{% url "view" id=", %}'})
def test_url_fail09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail09')
@setup({'url-fail11': '{% url named_url %}'})
def test_url_fail11(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail11')
@setup({'url-fail12': '{% url named_url %}'})
def test_url_fail12(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail12', {'named_url': 'no_such_view'})
@setup({'url-fail13': '{% url named_url %}'})
def test_url_fail13(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail13', {'named_url': 'template_tests.views.client'})
@setup({'url-fail14': '{% url named_url id, %}'})
def test_url_fail14(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail14', {'named_url': 'view'})
@setup({'url-fail15': '{% url named_url id= %}'})
def test_url_fail15(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail15', {'named_url': 'view'})
@setup({'url-fail16': '{% url named_url a.id=id %}'})
def test_url_fail16(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail16', {'named_url': 'view'})
@setup({'url-fail17': '{% url named_url a.id!id %}'})
def test_url_fail17(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail17', {'named_url': 'view'})
@setup({'url-fail18': '{% url named_url id="unterminatedstring %}'})
def test_url_fail18(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail18', {'named_url': 'view'})
@setup({'url-fail19': '{% url named_url id=", %}'})
def test_url_fail19(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail19', {'named_url': 'view'})
# {% url ... as var %}
@setup({'url-asvar01': '{% url "index" as url %}'})
def test_url_asvar01(self):
output = self.engine.render_to_string('url-asvar01')
self.assertEqual(output, '')
@setup({'url-asvar02': '{% url "index" as url %}{{ url }}'})
def test_url_asvar02(self):
output = self.engine.render_to_string('url-asvar02')
self.assertEqual(output, '/')
@setup({'url-asvar03': '{% url "no_such_view" as url %}{{ url }}'})
def test_url_asvar03(self):
output = self.engine.render_to_string('url-asvar03')
self.assertEqual(output, '')
@setup({'url-namespace01': '{% url "app:named.client" 42 %}'})
def test_url_namespace01(self):
request = self.request_factory.get('/')
request.resolver_match = resolve('/ns1/')
template = self.engine.get_template('url-namespace01')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns1/named-client/42/')
@setup({'url-namespace02': '{% url "app:named.client" 42 %}'})
def test_url_namespace02(self):
request = self.request_factory.get('/')
request.resolver_match = resolve('/ns2/')
template = self.engine.get_template('url-namespace02')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
@setup({'url-namespace03': '{% url "app:named.client" 42 %}'})
def test_url_namespace03(self):
request = self.request_factory.get('/')
template = self.engine.get_template('url-namespace03')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
@setup({'url-namespace-no-current-app': '{% url "app:named.client" 42 %}'})
def test_url_namespace_no_current_app(self):
request = self.request_factory.get('/')
request.resolver_match = resolve('/ns1/')
request.current_app = None
template = self.engine.get_template('url-namespace-no-current-app')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
@setup({'url-namespace-explicit-current-app': '{% url "app:named.client" 42 %}'})
def test_url_namespace_explicit_current_app(self):
request = self.request_factory.get('/')
request.resolver_match = resolve('/ns1/')
request.current_app = 'app'
template = self.engine.get_template('url-namespace-explicit-current-app')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
class URLNodeTest(SimpleTestCase):
def test_repr(self):
url_node = URLNode(view_name='named-view', args=[], kwargs={}, asvar=None)
self.assertEqual(
repr(url_node),
"<URLNode view_name='named-view' args=[] kwargs={} as=None>",
)
url_node = URLNode(
view_name='named-view',
args=[1, 2],
kwargs={'action': 'update'},
asvar='my_url',
)
self.assertEqual(
repr(url_node),
"<URLNode view_name='named-view' args=[1, 2] "
"kwargs={'action': 'update'} as='my_url'>",
)
| bsd-3-clause | -6,685,569,834,926,489,000 | 41.289116 | 112 | 0.590767 | false |
emattiza/maya | tests/test_maya.py | 1 | 5853 | import pytest
import copy
from datetime import timedelta
import maya
from maya.core import _seconds_or_timedelta # import private function
def test_rfc2822():
r = maya.parse('February 21, 1994').rfc2822()
d = maya.MayaDT.from_rfc2822(r)
assert r == 'Mon, 21 Feb 1994 00:00:00 GMT'
assert r == d.rfc2822()
def test_iso8601():
r = maya.parse('February 21, 1994').iso8601()
d = maya.MayaDT.from_iso8601(r)
assert r == '1994-02-21T00:00:00Z'
assert r == d.iso8601()
def test_parse_iso8601():
string = '20161001T1430.4+05:30'
expected = '2016-10-01T09:00:00.400000Z'
d = maya.MayaDT.from_iso8601(string)
assert expected == d.iso8601()
string = '2016T14'
expected = '2016-01-01T14:00:00Z'
d = maya.MayaDT.from_iso8601(string)
assert expected == d.iso8601()
string = '2016-10T14'
expected = '2016-10-01T14:00:00Z'
d = maya.MayaDT.from_iso8601(string)
assert expected == d.iso8601()
string = '2012W05'
expected = '2012-01-30T00:00:00Z'
d = maya.MayaDT.from_iso8601(string)
assert expected == d.iso8601()
string = '2012W055'
expected = '2012-02-03T00:00:00Z'
d = maya.MayaDT.from_iso8601(string)
assert expected == d.iso8601()
string = '2012007'
expected = '2012-01-07T00:00:00Z'
d = maya.MayaDT.from_iso8601(string)
assert expected == d.iso8601()
string = '2016-W07T09'
expected = '2016-02-15T09:00:00Z'
d = maya.MayaDT.from_iso8601(string)
assert expected == d.iso8601()
def test_human_when():
r1 = maya.when('yesterday')
r2 = maya.when('today')
assert (r2.day - r1.day) in (1, -30, -29, -28, -27)
def test_machine_parse():
r1 = maya.parse('August 14, 2015')
assert r1.day == 14
r2 = maya.parse('August 15, 2015')
assert r2.day == 15
def test_dt_tz_translation():
d1 = maya.now().datetime()
d2 = maya.now().datetime(to_timezone='EST')
assert (d1.hour - d2.hour) % 24 == 5
def test_dt_tz_naive():
d1 = maya.now().datetime(naive=True)
assert d1.tzinfo is None
d2 = maya.now().datetime(to_timezone='EST', naive=True)
assert d2.tzinfo is None
assert (d1.hour - d2.hour) % 24 == 5
def test_random_date():
# Test properties for maya.when()
d1 = maya.when('11-17-11 08:09:10')
assert d1.year == 2011
assert d1.month == 11
assert d1.day == 17
assert d1.week == 46
assert d1.weekday == 4
assert d1.hour == 8
assert d1.minute == 9
assert d1.second == 10
assert d1.microsecond == 0
# Test properties for maya.parse()
d2 = maya.parse('February 29, 1992 13:12:34')
assert d2.year == 1992
assert d2.month == 2
assert d2.day == 29
assert d2.week == 9
assert d2.weekday == 6
assert d2.hour == 13
assert d2.minute == 12
assert d2.second == 34
assert d2.microsecond == 0
def test_print_date(capsys):
d = maya.when('11-17-11')
print(d)
out, err = capsys.readouterr()
assert out == 'Thu, 17 Nov 2011 00:00:00 GMT\n'
assert repr(d) == '<MayaDT epoch=1321488000.0>'
def test_invalid_date():
with pytest.raises(ValueError):
maya.when('another day')
def test_slang_date():
d = maya.when('tomorrow')
assert d.slang_date() == 'tomorrow'
def test_slang_time():
d = maya.when('one hour ago')
assert d.slang_time() == 'an hour ago'
def test_parse():
d = maya.parse('February 21, 1994')
assert format(d) == '1994-02-21 00:00:00+00:00'
d = maya.parse('01/05/2016')
assert format(d) == '2016-01-05 00:00:00+00:00'
d = maya.parse('01/05/2016', day_first=True)
assert format(d) == '2016-05-01 00:00:00+00:00'
def test_datetime_to_timezone():
dt = maya.when('2016-01-01').datetime(to_timezone='US/Eastern')
assert dt.tzinfo.zone == 'US/Eastern'
def test_rfc3339():
mdt = maya.when('2016-01-01')
out = mdt.rfc3339()
mdt2 = maya.MayaDT.from_rfc3339(out)
assert mdt.epoch == mdt2.epoch
def test_comparison_operations():
now = maya.now()
now_copy = copy.deepcopy(now)
tomorrow = maya.when('tomorrow')
assert (now == now_copy) is True
assert (now == tomorrow) is False
assert (now != now_copy) is False
assert (now != tomorrow) is True
assert (now < now_copy) is False
assert (now < tomorrow) is True
assert (now <= now_copy) is True
assert (now <= tomorrow) is True
assert (now > now_copy) is False
assert (now > tomorrow) is False
assert (now >= now_copy) is True
assert (now >= tomorrow) is False
# Check Exceptions
with pytest.raises(TypeError):
now == 1
with pytest.raises(TypeError):
now != 1
with pytest.raises(TypeError):
now < 1
with pytest.raises(TypeError):
now <= 1
with pytest.raises(TypeError):
now > 1
with pytest.raises(TypeError):
now >= 1
def test_seconds_or_timedelta():
# test for value in seconds
assert _seconds_or_timedelta(1234) == timedelta(0, 1234)
# test for value as `datetime.timedelta`
assert _seconds_or_timedelta(timedelta(0, 1234)) == timedelta(0, 1234)
# test for invalid value
with pytest.raises(TypeError):
_seconds_or_timedelta('invalid interval')
def test_intervals():
now = maya.now()
tomorrow = now.add(days=1)
assert len(list(maya.intervals(now, tomorrow, 60*60))) == 24
def test_dunder_add():
now = maya.now()
assert now + 1 == now.add(seconds=1)
assert now + timedelta(seconds=1) == now.add(seconds=1)
def test_dunder_radd():
now = maya.now()
assert now.add(seconds=1) == now + 1
assert now.add(seconds=1) == now + timedelta(seconds=1)
def test_dunder_sub():
now = maya.now()
assert now - 1 == now.subtract(seconds=1)
assert now - timedelta(seconds=1) == now.subtract(seconds=1)
| mit | 5,969,255,941,538,656,000 | 23.286307 | 74 | 0.619341 | false |
cpausmit/Kraken | filefi/029/mc.py | 1 | 2187 | # $Id: BAMBUProd_AODSIM.py,v 1.40 2012/07/25 03:08:41 paus Exp $
import FWCore.ParameterSet.Config as cms
process = cms.Process('FILEFI')
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration/StandardSequences/MagneticField_38T_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.load('RecoVertex/PrimaryVertexProducer/OfflinePrimaryVertices_cfi')
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('Mit_029'),
annotation = cms.untracked.string('AODSIM'),
name = cms.untracked.string('BambuProduction')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
)
# input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('/store/relval/CMSSW_5_3_2-START53_V6/RelValProdTTbar/AODSIM/v2/0000/9A630BD7-C3B9-E111-BAB3-00304867918E.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *",
"drop *_MEtoEDMConverter_*_*",
"drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT")
# other statements
process.GlobalTag.globaltag = 'START53_V10::All'
process.add_(cms.Service("ObjectService"))
process.load("MitProd.BAMBUSequences.BambuFillAODSIM_cfi")
process.MitTreeFiller.TreeWriter.fileName = 'XX-MITDATASET-XX'
#process.output = cms.OutputModule("PoolOutputModule",
# outputCommands = cms.untracked.vstring('keep *'),
# fileName = cms.untracked.string ("test.root")
#)
process.bambu_step = cms.Path(process.BambuFillAODSIM)
# schedule definition
process.schedule = cms.Schedule(process.bambu_step)
#process.outpath = cms.EndPath(process.output)
| mit | 7,757,890,347,132,781,000 | 36.706897 | 148 | 0.716964 | false |
electronick1/todoist-python | todoist/managers/biz_invitations.py | 1 | 1220 | # -*- coding: utf-8 -*-
from .generic import Manager
class BizInvitationsManager(Manager):
state_name = None # there is no local state associated
object_type = None # there is no object type associated
resource_type = None # there is no resource type associated
def accept(self, invitation_id, invitation_secret):
"""
Appends a request to the queue, to accept a business invitation to
share a project.
"""
cmd = {
'type': 'biz_accept_invitation',
'uuid': self.api.generate_uuid(),
'args': {
'invitation_id': invitation_id,
'invitation_secret': invitation_secret,
},
}
self.queue.append(cmd)
def reject(self, invitation_id, invitation_secret):
"""
Appends a request to the queue, to reject a business invitation to
share a project.
"""
cmd = {
'type': 'biz_reject_invitation',
'uuid': self.api.generate_uuid(),
'args': {
'invitation_id': invitation_id,
'invitation_secret': invitation_secret,
},
}
self.queue.append(cmd)
| mit | -8,422,246,199,426,850,000 | 30.282051 | 74 | 0.548361 | false |
googleads/googleads-python-lib | examples/ad_manager/v202011/inventory_service/get_all_ad_units.py | 1 | 1750 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all ad units.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
ad_unit_service = client.GetService('InventoryService', version='v202011')
# Create a statement to select ad units.
statement = ad_manager.StatementBuilder(version='v202011')
# Retrieve a small amount of ad units at a time, paging
# through until all ad units have been retrieved.
while True:
response = ad_unit_service.getAdUnitsByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for ad_unit in response['results']:
# Print out some information for each ad unit.
print('Ad unit with ID "%s" and name "%s" was found.\n' %
(ad_unit['id'], ad_unit['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| apache-2.0 | -7,039,397,890,105,975,000 | 34.714286 | 77 | 0.713714 | false |
mscuthbert/abjad | abjad/tools/scoretools/FixedDurationContainer.py | 1 | 3389 | # -*- encoding: utf-8 -*-
from abjad.tools import durationtools
from abjad.tools.scoretools.Container import Container
class FixedDurationContainer(Container):
r'''A fixed-duration container.
::
>>> container = scoretools.FixedDurationContainer(
... (3, 8), "c'8 d'8 e'8")
>>> show(container) # doctest: +SKIP
.. doctest::
>>> print(format(container))
{
c'8
d'8
e'8
}
Fixed-duration containers extend container behavior with format-time
checking against a user-specified target duration.
Returns fixed-duration container.
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Containers'
__slots__ = (
'_target_duration',
)
### INITIALIZER ###
def __init__(self, target_duration=None, music=None, **kwargs):
Container.__init__(self, music=music, **kwargs)
target_duration = target_duration or durationtools.Duration(1, 4)
target_duration = durationtools.Duration(target_duration)
assert 0 < target_duration
self._target_duration = target_duration
### SPECIAL METHODS ###
def __repr__(self):
r'''Gets interpreter representation of fixed-duration container.
Returns string.
'''
result = '{}({!r}, {})'
result = result.format(
type(self).__name__,
self.target_duration,
list(self[:]),
)
return result
### PRIVATE METHODS ###
def _check_duration(self):
from abjad.tools import indicatortools
preprolated_duration = self._contents_duration
if preprolated_duration < self.target_duration:
raise UnderfullContainerError
if self.target_duration < preprolated_duration:
raise OverfullContainerError
@property
def _lilypond_format(self):
self._check_duration()
return self._format_component()
### PUBLIC PROPERTIES ###
@property
def is_full(self):
r'''Is true when preprolated duration equals target duration. Otherwise
false.
Returns boolean.
'''
return self._preprolated_duration == self.target_duration
@property
def is_misfilled(self):
r'''Is true when preprolated duration does not equal target duration.
Otherwise false.
Returns boolean.
'''
return not self.is_full
@property
def is_overfull(self):
r'''Is true when preprolated duration is greater than target duration.
Otherwise false.
Returns boolean.
'''
return self.target_duration < self._preprolated_duration
@property
def is_underfull(self):
r'''Is true when preprolated duration is less than target duration.
Otherwise false.
Returns boolean.
'''
return self._preprolated_duration < self.target_duration
@property
def target_duration(self):
r'''Gets and sets target duration of fixed-duration container.
Returns duration.
'''
return self._target_duration
@target_duration.setter
def target_duration(self, target_duration):
target_duration = durationtools.Duration(target_duration)
assert 0 < target_duration
self._target_duration = target_duration | gpl-3.0 | -9,112,601,096,330,817,000 | 25.484375 | 79 | 0.605783 | false |
bnbowman/HlaTools | src/pbhla/references/fofn.py | 1 | 2835 | import os, re, logging
from pbcore.io.FastaIO import FastaReader
log = logging.getLogger()
def parse_reference_fofn( fofn_file ):
basename = os.path.basename( fofn_file )
log.info('Parsing the reference FOFN: "%s"' % basename)
sequences = _parse_reference_sequences( fofn_file )
metadata = _parse_reference_metadata( fofn_file )
loci = _parse_reference_loci( fofn_file )
log.info('Finished parsing the reference FOFN')
return sequences, metadata, loci
def parse_reference_dict( fofn_file ):
"""
Parse a reference fofn of locus-specific fasta files into a dictionary
"""
reference_dict = {}
with open( fofn_file, 'r') as handle:
for line in handle:
if line.startswith('#'):
continue
fasta_file, locus = line.strip().split()
if locus in reference_dict:
msg = 'Duplicate locus references found (Locus %s)' % locus
log.error( msg )
raise KeyError( msg )
reference_dict[locus] = fasta_file
return reference_dict
def _parse_reference_sequences( fofn_file ):
log.info('Parsing reference sequence data...')
records = []
with open( fofn_file, 'r') as handle:
for line in handle:
if line.startswith('#'):
continue
filename, locus = line.strip().split()
records += list( FastaReader( filename ) )
log.info("Found %s reference sequence records" % len(records))
return records
def _parse_reference_metadata( fofn_file ):
log.info('Parsing reference metadata...')
metadata = {}
with open( fofn_file, 'r') as handle:
for line in handle:
if not line.startswith('#'):
continue
key, value = line[1:].strip().split('=')
metadata[key] = value
log.info('Found %s pieces of metadata' % len(metadata))
return metadata
def _parse_reference_loci( fofn_file ):
log.info('Parsing reference loci...')
results = {}
loci = set()
with open(fofn_file, 'r') as handle:
for line in handle:
if line.startswith('#'):
continue
fasta_path, locus = line.strip().split()
loci.add( locus )
fasta_file = os.path.basename( fasta_path )
log.info('Reading "{0}" sequences from "{1}"'.format(locus, fasta_file))
for record in FastaReader( fasta_path ):
name = record.name.split()[0]
if not re.search('__', name):
name = name.split('_')[0]
results[name] = locus
log.info('Finished parsing data for {0} sequences from {1} loci'.format( len(results),
len(loci) ))
return results
| bsd-3-clause | -9,052,151,865,152,977,000 | 36.302632 | 91 | 0.564374 | false |
dcbaker/alot | alot/account.py | 1 | 7204 | # Copyright (C) 2011-2012 Patrick Totzke <[email protected]>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
import abc
import glob
import logging
import mailbox
import os
from .helper import call_cmd_async
from .helper import split_commandstring
class SendingMailFailed(RuntimeError):
pass
class StoreMailError(Exception):
pass
class Account(object):
"""
Datastructure that represents an email account. It manages this account's
settings, can send and store mails to maildirs (drafts/send).
.. note::
This is an abstract class that leaves :meth:`send_mail` unspecified.
See :class:`SendmailAccount` for a subclass that uses a sendmail
command to send out mails.
"""
__metaclass__ = abc.ABCMeta
address = None
"""this accounts main email address"""
aliases = []
"""list of alternative addresses"""
alias_regexp = []
"""regex matching alternative addresses"""
realname = None
"""real name used to format from-headers"""
gpg_key = None
"""gpg fingerprint for this account's private key"""
signature = None
"""signature to append to outgoing mails"""
signature_filename = None
"""filename of signature file in attachment"""
signature_as_attachment = None
"""attach signature file instead of appending its content to body text"""
abook = None
"""addressbook (:class:`addressbook.AddressBook`)
managing this accounts contacts"""
def __init__(self, address=None, aliases=None, alias_regexp=None,
realname=None, gpg_key=None, signature=None,
signature_filename=None, signature_as_attachment=False,
sent_box=None, sent_tags=None, draft_box=None,
draft_tags=None, abook=None, sign_by_default=False,
encrypt_by_default=u"none",
**_):
sent_tags = sent_tags or []
if 'sent' not in sent_tags:
sent_tags.append('sent')
draft_tags = draft_tags or []
if 'draft' not in draft_tags:
draft_tags.append('draft')
self.address = address
self.aliases = aliases
self.alias_regexp = alias_regexp
self.realname = realname
self.gpg_key = gpg_key
self.signature = signature
self.signature_filename = signature_filename
self.signature_as_attachment = signature_as_attachment
self.sign_by_default = sign_by_default
self.sent_box = sent_box
self.sent_tags = sent_tags
self.draft_box = draft_box
self.draft_tags = draft_tags
self.abook = abook
# Handle encrypt_by_default in an backwards compatible way. The
# logging info call can later be upgraded to warning or error.
encrypt_by_default = encrypt_by_default.lower()
msg = "Deprecation warning: The format for the encrypt_by_default " \
"option changed. Please use 'none', 'all' or 'trusted'."
if encrypt_by_default in (u"true", u"yes", u"1"):
encrypt_by_default = u"all"
logging.info(msg)
elif encrypt_by_default in (u"false", u"no", u"0"):
encrypt_by_default = u"none"
logging.info(msg)
self.encrypt_by_default = encrypt_by_default
def get_addresses(self):
"""return all email addresses connected to this account, in order of
their importance"""
return [self.address] + self.aliases
@staticmethod
def store_mail(mbx, mail):
"""
stores given mail in mailbox. If mailbox is maildir, set the S-flag and
return path to newly added mail. Oherwise this will return `None`.
:param mbx: mailbox to use
:type mbx: :class:`mailbox.Mailbox`
:param mail: the mail to store
:type mail: :class:`email.message.Message` or str
:returns: absolute path of mail-file for Maildir or None if mail was
successfully stored
:rtype: str or None
:raises: StoreMailError
"""
if not isinstance(mbx, mailbox.Mailbox):
logging.debug('Not a mailbox')
return False
mbx.lock()
if isinstance(mbx, mailbox.Maildir):
logging.debug('Maildir')
msg = mailbox.MaildirMessage(mail)
msg.set_flags('S')
else:
logging.debug('no Maildir')
msg = mailbox.Message(mail)
try:
message_id = mbx.add(msg)
mbx.flush()
mbx.unlock()
logging.debug('got mailbox msg id : %s', message_id)
except Exception as e:
raise StoreMailError(e)
path = None
# add new Maildir message to index and add tags
if isinstance(mbx, mailbox.Maildir):
# this is a dirty hack to get the path to the newly added file
# I wish the mailbox module were more helpful...
plist = glob.glob1(os.path.join(mbx._path, 'new'),
message_id + '*')
if plist:
path = os.path.join(mbx._path, 'new', plist[0])
logging.debug('path of saved msg: %s', path)
return path
def store_sent_mail(self, mail):
"""
stores mail (:class:`email.message.Message` or str) in send-store if
:attr:`sent_box` is set.
"""
if self.sent_box is not None:
return self.store_mail(self.sent_box, mail)
def store_draft_mail(self, mail):
"""
stores mail (:class:`email.message.Message` or str) as draft if
:attr:`draft_box` is set.
"""
if self.draft_box is not None:
return self.store_mail(self.draft_box, mail)
@abc.abstractmethod
def send_mail(self, mail):
"""
sends given mail
:param mail: the mail to send
:type mail: :class:`email.message.Message` or string
:returns: a `Deferred` that errs back with a class:`SendingMailFailed`,
containing a reason string if an error occured.
"""
pass
class SendmailAccount(Account):
""":class:`Account` that pipes a message to a `sendmail` shell command for
sending"""
def __init__(self, cmd, **kwargs):
"""
:param cmd: sendmail command to use for this account
:type cmd: str
"""
super(SendmailAccount, self).__init__(**kwargs)
self.cmd = cmd
def send_mail(self, mail):
cmdlist = split_commandstring(self.cmd)
def cb(out):
logging.info('sent mail successfully')
logging.info(out)
def errb(failure):
termobj = failure.value
errmsg = '%s failed with code %s:\n%s' % \
(self.cmd, termobj.exitCode, str(failure.value))
logging.error(errmsg)
logging.error(failure.getTraceback())
logging.error(failure.value.stderr)
raise SendingMailFailed(errmsg)
d = call_cmd_async(cmdlist, stdin=mail)
d.addCallback(cb)
d.addErrback(errb)
return d
| gpl-3.0 | 6,802,126,089,319,658,000 | 33.634615 | 79 | 0.594531 | false |
otto-de/gatekeeper | tests/test_gates.py | 1 | 1185 | import unittest
from app import gates
from tests.helpers.database_helper import DatabaseHelper
test_rules = {
"live": {
"no_restriction": {
"hours_range": [0, 24],
"days_range": [0, 6]
}
},
"develop": {
"no_restriction": {
"hours_range": [0, 24],
"days_range": [0, 6]
}
}
}
class TestGates(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.database_helper = DatabaseHelper('test')
cls.database_helper.clear_database()
@classmethod
def tearDown(cls):
cls.database_helper.clear_database()
def test_are_gates_open_when_there_is_no_holiday_today(self):
self.database_helper.create_holiday(1, 'Holidays are always tomorrow', ['live'])
self.assertEqual(True, gates.are_gates_open(test_rules, 'live'))
def test_are_gates_closed_on_live_but_open_on_develop(self):
self.database_helper.create_holiday(0, 'Party time today', ['live'])
self.assertEqual(False, gates.are_gates_open(test_rules, 'live'))
self.assertEqual(True, gates.are_gates_open(test_rules, 'develop'))
| apache-2.0 | -8,818,432,962,294,664,000 | 28.625 | 88 | 0.61097 | false |
thonkify/thonkify | src/lib/telegram/inlinequeryresultgame.py | 1 | 1573 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram
InlineQueryResultGame"""
from telegram import InlineQueryResult, InlineKeyboardMarkup
class InlineQueryResultGame(InlineQueryResult):
def __init__(self, id, game_short_name, reply_markup=None, **kwargs):
# Required
super(InlineQueryResultGame, self).__init__('game', id)
self.id = id
self.game_short_name = game_short_name
if reply_markup:
self.reply_markup = reply_markup
@staticmethod
def de_json(data, bot):
data = super(InlineQueryResultGame, InlineQueryResultGame).de_json(data, bot)
data['reply_markup'] = InlineKeyboardMarkup.de_json(data.get('reply_markup'), bot)
return InlineQueryResultGame(**data)
| mit | 5,235,290,454,981,283,000 | 37.365854 | 90 | 0.72473 | false |
wlan0/cattle | tests/integration/cattletest/core/test_api.py | 2 | 11161 | from cattle import ApiError
from common_fixtures import * # NOQA
def test_agent_unique(super_client):
agents = super_client.list_agent(uri='sim://unique')
if len(agents) == 0:
agent = super_client.create_agent(uri='sim://unique')
agent = super_client.wait_success(agent)
assert agent.state == 'active'
agent.deactivate()
try:
super_client.create_agent(uri='sim://unique')
assert False
except ApiError, e:
assert e.error.code == 'NotUnique'
pass
def test_list_sort(super_client, context):
name = random_str()
containers = []
for i in range(2):
c = context.create_container_no_success(name=name, startOnCreate=False)
containers.append(c)
r = super_client.list_container(name=name)
for i in range(len(r)):
assert containers[i].id == r[i].id
r = super_client.list_container(name=name, sort='created', order='desc')
containers.reverse()
for i in range(len(r)):
assert containers[i].id == r[i].id
def test_pagination(context):
client = context.client
name = random_str()
containers = []
for i in range(4):
c = client.create_container(imageUuid=context.image_uuid, name=name)
containers.append(c)
for c in containers:
client.wait_success(c)
r = client.list_container(name=name)
assert len(r) == 4
try:
assert r.pagination.next is None
except AttributeError:
pass
collected = {}
r = client.list_container(name=name, limit=2)
assert len(r) == 2
assert r.pagination.next is not None
for i in r:
collected[i.id] = True
r = r.next()
assert len(r) == 2
try:
assert r.pagination.next is None
except AttributeError:
pass
for i in r:
collected[i.id] = True
assert len(collected) == 4
def test_pagination_include(super_client, new_context):
context = new_context
client = new_context.client
host = context.host
name = random_str()
container_ids = []
containers = []
for i in range(5):
c = client.create_container(imageUuid=context.image_uuid,
name=name,
requestedHostId=host.id)
c = super_client.reload(c)
containers.append(c)
container_ids.append(c.id)
for c in containers:
client.wait_success(c)
assert len(containers[0].instanceHostMaps()) == 1
assert host.id == containers[0].instanceHostMaps()[0].host().id
r = super_client.list_container(name=name)
assert len(r) == 5
for c in r:
assert len(c.instanceHostMaps()) == 1
assert c.instanceHostMaps()[0].hostId == host.id
collected = {}
r = super_client.list_container(name=name, include='instanceHostMaps',
limit=2)
assert len(r) == 2
for c in r:
collected[c.id] = True
assert len(c.instanceHostMaps) == 1
assert c.instanceHostMaps[0].hostId == host.id
r = r.next()
assert len(r) == 2
for c in r:
collected[c.id] = True
assert len(c.instanceHostMaps) == 1
assert c.instanceHostMaps[0].hostId == host.id
r = r.next()
assert len(r) == 1
for c in r:
collected[c.id] = True
assert len(c.instanceHostMaps) == 1
assert c.instanceHostMaps[0].hostId == host.id
assert not r.pagination.partial
maps = []
for id in container_ids:
maps.extend(super_client.list_instanceHostMap(hostId=host.id,
instanceId=id))
assert len(maps) == 5
maps_from_include = []
r = super_client.list_host(include='instanceHostMaps', limit=2,
accountId=host.accountId)
while True:
for h in r:
if h.id == host.id:
assert len(h.instanceHostMaps) <= 2
for m in h.instanceHostMaps:
if m.instanceId in container_ids and \
m.instanceId not in maps_from_include:
maps_from_include.append(m.instanceId)
for c in containers:
if c.id == m.instanceId:
client.wait_success(c.stop())
try:
r = r.next()
except AttributeError:
break
assert len(maps) == len(maps_from_include)
del maps_from_include[:]
r = super_client.list_host(include='instances', limit=2,
accountId=host.accountId)
while True:
for h in r:
if h.id == host.id:
for c in h.instances:
if c.id in container_ids and \
c.id not in maps_from_include:
maps_from_include.append(c.id)
client.wait_success(c.start())
try:
r = r.next()
except AttributeError:
break
assert len(maps) == len(maps_from_include)
def test_include_left_join(super_client, context):
container = context.create_container_no_success(startOnCreate=False)
container = context.wait_for_state(container, 'stopped')
c = super_client.by_id('container', container.id,
include='instanceHostMaps')
assert container.id == c.id
def test_include_left_join_sort(super_client, context):
client = context.client
name = random_str()
containers = []
for i in range(2):
c = client.create_container(imageUuid=context.image_uuid, name=name)
containers.append(c)
for c in containers:
client.wait_success(c)
r = super_client.list_container(name=name, include='instanceHostMaps',
sort='created', order='asc')
for i in range(len(r)):
assert containers[i].id == r[i].id
r = super_client.list_container(name=name, include='instanceHostMaps',
sort='created', order='desc')
containers.reverse()
for i in range(len(r)):
assert containers[i].id == r[i].id
def test_include(super_client, context):
container = context.create_container(name='include_test')
container = super_client.reload(container)
for link_name in ['instanceHostMaps', 'instancehostmaps']:
found = False
for c in super_client.list_container(name_like='include_test%'):
if c.id == container.id:
found = True
assert len(c.instanceHostMaps()) == 1
assert callable(c.instanceHostMaps)
assert found
found = False
for c in super_client.list_container(include=link_name,
name_like='include_test%'):
if c.id == container.id:
found = True
assert len(c.instanceHostMaps) == 1
assert found
c = super_client.by_id('container', container.id)
assert callable(c.instanceHostMaps)
c = super_client.by_id('container', container.id, include=link_name)
assert len(c.instanceHostMaps) == 1
def test_limit(super_client):
result = super_client.list_container()
assert result.pagination.limit == 100
result = super_client.list_container(limit=105)
assert result.pagination.limit == 105
result = super_client.list_container(limit=10005)
assert result.pagination.limit == 3000
def test_schema_boolean_default(client):
con_schema = client.schema.types['container']
assert isinstance(con_schema.resourceFields.startOnCreate.default, bool)
def test_schema_self_link(client):
con_schema = client.schema.types['container']
assert con_schema.links.self is not None
assert con_schema.links.self.startswith("http")
def test_child_map_include(super_client, context):
container = context.create_container()
cs = super_client.list_container(uuid=container.uuid, include='hosts')
assert cs[0].hosts[0].uuid is not None
assert len(cs[0].hosts) == 1
hs = super_client.list_host(uuid=cs[0].hosts[0].uuid,
include='instances')
found = False
for i in hs[0].instances:
if i.uuid == cs[0].uuid:
found = True
assert found
def test_child_map(super_client, context):
container = context.create_container()
hosts = super_client.reload(container).hosts()
assert len(hosts) == 1
assert hosts[0].type == 'host'
def test_fields_on_include(super_client, context):
c = context.create_container()
host = super_client.by_id_host(context.host.id,
include='instances')
assert host is not None
found = False
for instance in host.instances:
if instance.id == c.id:
assert instance.transitioning == 'no'
assert 'stop' in instance
assert callable(instance.stop)
assert len(instance.links) > 1
found = True
break
assert found
def test_state_enum(super_client):
container_schema = super_client.schema.types['container']
states = set([
'creating',
'migrating',
'purged',
'purging',
'removed',
'removing',
'requested',
'restarting',
'restoring',
'running',
'starting',
'stopped',
'stopping',
'updating-running',
'updating-stopped',
'error',
'erroring'
])
assert container_schema.resourceFields['state'].type == 'enum'
assert states == set(container_schema.resourceFields['state'].options)
def test_actions_based_on_state(context):
c = context.create_container()
assert set(c.actions.keys()) == set(['migrate', 'restart', 'stop',
'update', 'execute', 'logs',
'proxy'])
def test_include_user_not_auth_map(client):
client.list_host(include='instances')
def test_map_user_not_auth_map(context):
c = context.create_container()
assert len(c.hosts()) == 1
def test_role_option(admin_user_client, client, random_str, context):
c = admin_user_client.create_api_key(name=random_str,
accountId=context.account.id)
c = admin_user_client.wait_success(c)
assert c.state == 'active'
creds = admin_user_client.list_credential(name=random_str)
assert len(creds) == 1
creds = admin_user_client.list_credential(name=random_str,
_role='user')
assert len(creds) == 0
creds = client.list_credential(name=random_str, _role='superadmin')
assert len(creds) == 0
schemas = [x for x in admin_user_client.list_schema(_role='project')
if x.id == 'externalHandler']
assert len(schemas) == 0
def test_query_length(admin_user_client):
big = 'a' * 8192
admin_user_client.list_account(name=big)
bigger = 'a' * (16384 - 512)
admin_user_client.list_account(name=bigger)
| apache-2.0 | 1,746,889,417,423,122,700 | 28.140992 | 79 | 0.578891 | false |
devttys0/binwalk | src/binwalk/plugins/jffs2valid.py | 1 | 1762 | import struct
import binascii
import binwalk.core.plugin
class JFFS2ValidPlugin(binwalk.core.plugin.Plugin):
'''
Helps validate JFFS2 signature results.
The JFFS2 signature rules catch obvious cases, but inadvertently
mark some valid JFFS2 nodes as invalid due to padding (0xFF's or
0x00's) in between nodes.
'''
MODULES = ['Signature']
def _check_crc(self, node_header):
# struct and binascii want a bytes object in Python3
node_header = binwalk.core.compat.str2bytes(node_header)
# Get the header's reported CRC value
if node_header[0:2] == b"\x19\x85":
header_crc = struct.unpack(">I", node_header[8:12])[0]
else:
header_crc = struct.unpack("<I", node_header[8:12])[0]
# Calculate the actual CRC
calculated_header_crc = (binascii.crc32(node_header[0:8], -1) ^ -1) & 0xffffffff
# Make sure they match
return (header_crc == calculated_header_crc)
def scan(self, result):
if result.file and result.description.lower().startswith('jffs2 filesystem'):
# Seek to and read the suspected JFFS2 node header
fd = self.module.config.open_file(result.file.path, offset=result.offset)
# JFFS2 headers are only 12 bytes in size, but reading larger amounts of
# data from disk speeds up repeated disk access and decreases performance
# hits (disk caching?).
#
# TODO: Should this plugin validate the *entire* JFFS2 file system, rather
# than letting the signature module find every single JFFS2 node?
node_header = fd.read(1024)
fd.close()
result.valid = self._check_crc(node_header[0:12])
| mit | -9,069,981,298,579,970,000 | 36.489362 | 88 | 0.635074 | false |
phenoxim/nova | nova/tests/unit/scheduler/fakes.py | 1 | 7133 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fakes For Scheduler tests.
"""
import datetime
from nova import objects
from nova.scheduler import driver
from nova.scheduler import host_manager
from nova.tests import uuidsentinel
NUMA_TOPOLOGY = objects.NUMATopology(cells=[
objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[
objects.NUMAPagesTopology(size_kb=16, total=387184, used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)],
pinned_cpus=set([]),
siblings=[set([1]), set([2])]),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=1548736, used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)],
pinned_cpus=set([]),
siblings=[set([3]), set([4])])])
NUMA_TOPOLOGIES_W_HT = [
objects.NUMATopology(cells=[
objects.NUMACell(
id=0, cpuset=set([1, 2, 5, 6]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[set([1, 5]), set([2, 6])], pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4, 7, 8]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[set([3, 4]), set([7, 8])], pinned_cpus=set([]))
]),
objects.NUMATopology(cells=[
objects.NUMACell(
id=0, cpuset=set([]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([1, 2, 5, 6]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[set([1, 5]), set([2, 6])], pinned_cpus=set([])),
objects.NUMACell(
id=2, cpuset=set([3, 4, 7, 8]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[set([3, 4]), set([7, 8])], pinned_cpus=set([])),
]),
]
COMPUTE_NODES = [
objects.ComputeNode(
uuid=uuidsentinel.cn1,
id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=None, free_ram_mb=512, vcpus_used=1,
free_disk_gb=512, local_gb_used=0,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host='host1', hypervisor_hostname='node1', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
uuid=uuidsentinel.cn2,
id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_gb=1024, local_gb_used=0,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host='host2', hypervisor_hostname='node2', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
uuid=uuidsentinel.cn3,
id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
free_disk_gb=3072, local_gb_used=0,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host='host3', hypervisor_hostname='node3', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=NUMA_TOPOLOGY._to_json(),
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
uuid=uuidsentinel.cn4,
id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_gb=8888, local_gb_used=0,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host='host4', hypervisor_hostname='node4', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
# Broken entry
objects.ComputeNode(
uuid=uuidsentinel.cn5,
id=5, local_gb=1024, memory_mb=1024, vcpus=1,
host='fake', hypervisor_hostname='fake-hyp'),
]
ALLOC_REQS = [
{
'allocations': {
cn.uuid: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 512,
'DISK_GB': 512,
},
}
}
} for cn in COMPUTE_NODES
]
RESOURCE_PROVIDERS = [
dict(
uuid=uuidsentinel.rp1,
name='host1',
generation=1),
dict(
uuid=uuidsentinel.rp2,
name='host2',
generation=1),
dict(
uuid=uuidsentinel.rp3,
name='host3',
generation=1),
dict(
uuid=uuidsentinel.rp4,
name='host4',
generation=1),
]
SERVICES = [
objects.Service(host='host1', disabled=False),
objects.Service(host='host2', disabled=True),
objects.Service(host='host3', disabled=False),
objects.Service(host='host4', disabled=False),
]
def get_service_by_host(host):
services = [service for service in SERVICES if service.host == host]
return services[0]
class FakeHostState(host_manager.HostState):
def __init__(self, host, node, attribute_dict, instances=None):
super(FakeHostState, self).__init__(host, node, None)
if instances:
self.instances = {inst.uuid: inst for inst in instances}
else:
self.instances = {}
for (key, val) in attribute_dict.items():
setattr(self, key, val)
class FakeScheduler(driver.Scheduler):
def select_destinations(self, context, spec_obj, instance_uuids):
return []
| apache-2.0 | 8,389,918,362,189,295,000 | 37.556757 | 78 | 0.582644 | false |
dekatzenel/team-k | mds/urls.py | 1 | 4352 | """The Django url to handler mappings for mDS.
:Authors: Sana dev team
:Version: 2.0
"""
from django.conf import settings
from django.conf.urls.defaults import patterns, url, include
from django.views.generic.simple import redirect_to
from django.contrib import admin
#from piston.authentication import HttpBasicAuthentication
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', 'core.views.home', name="home"),
url(r'^login/', 'django.contrib.auth.views.login'),
url(r'^core/', include('core.urls', namespace='core')),
url(r'^tasks/', include('tasks.urls', namespace='tasks')),
url(r'^clients/', include('clients.urls', namespace='clients')),
# ADMIN
(r'^admin/', include(admin.site.urls)),
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
)
"""The mappings Django uses to send requests to the appropriate handlers."""
if settings.DEBUG:
urlpatterns += patterns(
'django.contrib.staticfiles.views',
url(r'^static/(?P<path>.*)$', 'serve'),
)
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT }),
)
if 'v1' in settings.APICOMPAT_INCLUDE:
from piston.resource import Resource
from mrs.handlers import *
v1auth_resource = Resource(AuthHandler)
v1notification_resource = Resource(NotificationHandler)
v1smtp_resource = Resource(SMTPHandler)
v1patient_resource = Resource(PatientHandler)
v1savedprocedure_resource = Resource(SavedProcedureHandler)
v1event_resource = Resource(EventHandler)
v1requestlog_resource = Resource(EventHandler)
v1binary_resource = Resource(BinaryHandler)
v1binarypacket_resource = Resource(BinaryPacketHandler)
v1base64packet_resource = Resource(Base64PacketHandler)
v1requestlog_resource = Resource(RequestLogHandler)
v1patterns = patterns(
'',
url(r'^notifications/$',
v1notification_resource,
name="sana-list-notifications"),
url(r'^notifications/submit/$',
v1notification_resource,
name="sana-api-notification-submit"),
url(r'^notifications/submit/email/$',
v1smtp_resource,
name="sana-api-email-notification-submit"),
url(r'^json/patient/list/$',
v1patient_resource,
name="sana-json-patient-list"),
url(r'^json/patient/(?P<id>[0-9-]+)/$',
v1patient_resource,
name="sana-json-patient-get"),
url(r'^json/validate/credentials/$',
v1auth_resource,
name = "sana-json-validate-credentials"),
url(r'^procedure/submit/$',
v1savedprocedure_resource,
name="sana-html-procedure-submit"),
url(r'^json/procedure/submit/$',
v1savedprocedure_resource,
name="sana-json-procedure-submit"),
url(r'^json/binary/submit/$',
v1binary_resource,
name="sana-json-binary-submit"),
url(r'^json/binarychunk/submit/$',
v1binarypacket_resource,
name="sana-json-binarychunk-submit"),
url(r'^json/textchunk/submit/$',
v1base64packet_resource,
name="sana-json-binarychunk-hack-submit"),
url(r'^json/eventlog/submit/$',
v1event_resource,
name="sana-json-eventlog-submit"),
# LOGGING
url(r'^log-detail/$',
v1requestlog_resource,
#'sana.api.v1.util.log_json_detail',
name="log-json-detail-noarg"),
url(r'^log-detail/(?P<uuid>\d+)$',
v1requestlog_resource,
name="log-json-detail"),
url(r'^log/$',
v1requestlog_resource,
name="log-view"),
url(r'^log/web/$',
v1requestlog_resource,
name="log-web-view"),
url(r'^log/list/$',
v1requestlog_resource,
name="requestlog-list"),
url(r'^log/(?P<uuid>[^/]+)/$',
v1requestlog_resource,
name="requestlog"),
)
# Add v1 compat urls
urlpatterns += v1patterns
| bsd-3-clause | -9,044,449,508,689,583,000 | 30.773723 | 112 | 0.580423 | false |
UUDigitalHumanitieslab/timealign | annotations/management/commands/import_tenses.py | 1 | 4002 | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from annotations.models import Language, Tense, Annotation, Fragment, LabelKey, Label
from selections.models import Selection
class Command(BaseCommand):
help = 'Imports Tenses/Labels for Annotations, Selections, and Fragments'
def add_arguments(self, parser):
parser.add_argument('language', type=str)
parser.add_argument('filenames', nargs='+', type=str)
parser.add_argument('--model', action='store', dest='model', default='annotation')
def handle(self, *args, **options):
try:
language = Language.objects.get(iso=options['language'])
except Language.DoesNotExist:
raise CommandError('Language {} does not exist'.format(options['language']))
for filename in options['filenames']:
with open(filename, 'r') as csvfile:
try:
process_file(csvfile, language, options['model'])
self.stdout.write('Successfully imported labels')
except ValueError as e:
raise CommandError(e.message)
def process_file(f, language, model='annotation'):
f = iter(f)
header = next(f)
if isinstance(header, bytes):
header = header.decode()
header = header.strip().split('\t')
columns = []
for h in header[1:]:
if isinstance(h, bytes):
h = h.decode()
if h.lower() in ['tense', 'comments']:
columns.append(h)
else:
try:
key = LabelKey.objects.get(title__iexact=h)
columns.append(key)
except LabelKey.DoesNotExist:
raise ValueError('Unknown label "{}"'.format(h))
for row in f:
row = row.decode().strip()
if row:
encoded = row.split('\t')
if model == 'annotation':
obj = get_annotation(encoded)
elif model == 'selection':
obj = get_selection(encoded)
elif model == 'fragment':
obj = get_fragment(encoded)
else:
raise ValueError('Unknown model {}'.format(model))
update_fields(obj, language, encoded, columns)
def update_fields(obj, language, row, columns):
for idx, column in enumerate(columns):
if idx + 1 >= len(row):
continue
cell = row[idx + 1]
if column == 'tense':
try:
obj.tense = Tense.objects.get(title__iexact=cell, language=language)
except Tense.DoesNotExist:
raise ValueError('Tense with title "{}" not found.'.format(row[1]))
elif column == 'comments':
if isinstance(obj, (Annotation, Selection)):
obj.comments = cell
else:
raise ValueError('Cannot add comments to Fragment')
elif isinstance(column, LabelKey):
if column.language_specific:
label, created = Label.objects.get_or_create(title=cell, key=column, language=language)
else:
label, created = Label.objects.get_or_create(title=cell, key=column)
if created:
label.save()
for existing in obj.labels.filter(key=column):
obj.labels.remove(existing)
obj.labels.add(label)
obj.save()
def get_annotation(row):
try:
return Annotation.objects.get(pk=row[0])
except Annotation.DoesNotExist:
raise ValueError('Annotation with pk {} not found.'.format(row[0]))
def get_selection(row):
try:
return Selection.objects.get(pk=row[0])
except Selection.DoesNotExist:
raise ValueError('Selection with pk {} not found.'.format(row[0]))
def get_fragment(row):
try:
return Fragment.objects.get(pk=row[0])
except Fragment.DoesNotExist:
raise ValueError('Fragment with pk {} not found.'.format(row[0]))
| mit | -8,120,638,896,575,483,000 | 32.35 | 103 | 0.57921 | false |
vslavik/bakefile | src/bkl/interpreter/__init__.py | 1 | 9662 | #
# This file is part of Bakefile (http://bakefile.org)
#
# Copyright (C) 2008-2013 Vaclav Slavik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module contains the very core of Bakefile -- the interpreter,
:class:`bkl.interpreter.Interpreter`, and its supporting classes.
"""
import logging
import bkl.parser
import bkl.model
import bkl.api
import bkl.expr
import passes
from builder import Builder
from bkl.error import Error, warning
from bkl.parser import parse_file
logger = logging.getLogger("bkl.interpreter")
class Interpreter(object):
"""
The interpreter is responsible for doing everything necessary to
"translate" input ``.bkl`` files into generated native makefiles. This
includes building a project model from the input, checking it for
correctness, optimizing it and creating outputs for all enabled toolsets.
:class:`Interpreter` provides both high-level interface for single-call
usage (see :meth:`process`) and other methods with finer granularity that
allows you to inspect individual steps (most useful for the test suite).
.. attribute:: model
Model of the project, as :class:`bkl.model.Project`. It's state always
reflects current state of processing.
.. attribute:: toolsets_to_use
Set of toolsets to generate for. This list may contain only a subset of
toolsets the bakefile is written for and may even contain toolsets not
specified in the bakefile.
If :const:`None` (the default), then the toolsets listed in the bakefile
are used.
"""
def __init__(self):
self.model = bkl.model.Project()
self.toolsets_to_use = None
def limit_toolsets(self, toolsets):
"""Sets :attr:`toolsets_to_use`."""
self.toolsets_to_use = set(toolsets)
def process(self, ast):
"""
Interprets input file and generates the outputs.
:param ast: AST of the input file, as returned by
:func:`bkl.parser.parse_file`.
Processing is done in several phases:
1. Basic model is built (see :class:`bkl.interpreter.builder.Builder`).
No optimizations or checks are performed at this point.
2. Several generic optimization and checking passes are run on the
model. Among other things, types correctness and other constraints
are checked, variables are substituted and evaluated.
3. The model is split into several copies, one per output toolset.
4. Further optimization passes are done.
5. Output files are generated.
Step 1 is done by :meth:`add_module`. Steps 2-4 are done by
:meth:`finalize` and step 5 is implemented in :meth:`generate`.
"""
self.add_module(ast, self.model)
self.finalize()
self.generate()
def process_file(self, filename):
"""Like :meth:`process()`, but takes filename as its argument."""
self.process(parse_file(filename))
def add_module(self, ast, parent):
"""
Adds parsed AST to the model, without doing any optimizations. May be
called more than once, with different parsed files.
:param ast: AST of the input file, as returned by
:func:`bkl.parser.parse_file`.
"""
logger.info("processing %s", ast.filename)
submodules = []
b = Builder(on_submodule=lambda fn, pos: submodules.append((fn,pos)))
module = b.create_model(ast, parent)
while submodules:
sub_filename, sub_pos = submodules[0]
submodules.pop(0)
try:
sub_ast = parse_file(sub_filename)
except IOError as e:
if e.filename:
msg = "%s: %s" % (e.strerror, e.filename)
else:
msg = e.strerror
raise Error(msg, pos=sub_pos)
self.add_module(sub_ast, module)
def _call_custom_steps(self, model, func):
for step in bkl.api.CustomStep.all():
logger.debug("invoking custom step %s.%s()", step.name, func)
getattr(step, func)(model)
def finalize(self):
"""
Finalizes the model, i.e. checks it for validity, optimizes, creates
per-toolset models etc.
"""
logger.debug("finalizing the model")
# call any custom steps first:
self._call_custom_steps(self.model, "finalize")
# then apply standard processing:
passes.detect_potential_problems(self.model)
passes.normalize_and_validate_bool_subexpressions(self.model)
passes.normalize_vars(self.model)
passes.validate_vars(self.model)
passes.normalize_paths_in_model(self.model, toolset=None)
passes.simplify_exprs(self.model)
def finalize_for_toolset(self, toolset_model, toolset):
"""
Finalizes after "toolset" variable was set.
"""
passes.remove_disabled_model_parts(toolset_model, toolset)
# TODO: do this in finalize() instead
passes.make_variables_for_missing_props(toolset_model, toolset)
passes.eliminate_superfluous_conditionals(toolset_model)
# This is done second time here (in addition to finalize()) to deal
# with paths added by make_variables_for_missing_props() and paths with
# @builddir (which is toolset specific and couldn't be resolved
# earlier). Ideally we wouldn't do it, but hopefully it's not all that
# inefficient, as no real work is done for paths that are already
# normalized:
passes.normalize_paths_in_model(toolset_model, toolset)
def make_toolset_specific_model(self, toolset, skip_making_copy=False):
"""
Returns toolset-specific model, i.e. one that works only with
*toolset*, has the ``toolset`` property set to it. The caller
still needs to call finalize_for_toolset() on it.
"""
if skip_making_copy:
model = self.model
else:
model = self.model.clone()
# don't use Variable.from_property(), because it's read-only
model.add_variable(bkl.model.Variable.from_property(
model.get_prop("toolset"),
bkl.expr.LiteralExpr(toolset)))
return model
def generate(self):
"""
Generates output files.
"""
# collect all requested toolsets:
toolsets = set()
for module in self.model.modules:
module_toolsets = module.get_variable("toolsets")
if module_toolsets:
toolsets.update(module_toolsets.value.as_py())
if self.toolsets_to_use:
for t in self.toolsets_to_use:
if t not in toolsets:
try:
bkl.api.Toolset.get(t)
except KeyError:
raise Error("unknown toolset \"%s\" given on command line" % t)
warning("toolset \"%s\" is not supported by the project, there may be issues", t)
# Add the forced toolset to all submodules:
for module in self.model.modules:
module_toolsets = module.get_variable("toolsets")
if module_toolsets:
module_toolsets.value.items.append(bkl.expr.LiteralExpr(t))
toolsets = self.toolsets_to_use
toolsets = list(toolsets)
logger.debug("toolsets to generate for: %s", toolsets)
if not toolsets:
raise Error("nothing to generate, \"toolsets\" property is empty")
# call any custom steps first:
self._call_custom_steps(self.model, "generate")
# and generate the outputs (notice that we can avoid making a
# (expensive!) deepcopy of the model for one of the toolsets and can
# reuse the current model):
for toolset in toolsets[:-1]:
self.generate_for_toolset(toolset)
self.generate_for_toolset(toolsets[-1], skip_making_copy=True)
def generate_for_toolset(self, toolset, skip_making_copy=False):
"""
Generates output for given *toolset*.
"""
logger.debug("****** preparing model for toolset %s ******", toolset)
model = self.make_toolset_specific_model(toolset, skip_making_copy)
self.finalize_for_toolset(model, toolset)
logger.debug("****** generating for toolset %s ********", toolset)
bkl.api.Toolset.get(toolset).generate(model)
| mit | 574,072,912,695,532,500 | 36.449612 | 101 | 0.630718 | false |
GoogleCloudPlatform/training-data-analyst | blogs/babyweight/babyweight/trainer/model.py | 1 | 9383 | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import shutil
import numpy as np
import tensorflow as tf
import hypertune
# Determine CSV, label, and key columns
CSV_COLUMNS = ["weight_pounds",
"is_male",
"mother_age",
"plurality",
"gestation_weeks"]
LABEL_COLUMN = "weight_pounds"
# Set default values for each CSV column.
# Treat is_male and plurality as strings.
DEFAULTS = [[0.0], ["null"], [0.0], ["null"], [0.0]]
def features_and_labels(row_data):
"""Splits features and labels from feature dictionary.
Args:
row_data: Dictionary of CSV column names and tensor values.
Returns:
Dictionary of feature tensors and label tensor.
"""
label = row_data.pop(LABEL_COLUMN)
return row_data, label # features, label
def load_dataset(pattern, batch_size=1, mode='eval'):
"""Loads dataset using the tf.data API from CSV files.
Args:
pattern: str, file pattern to glob into list of files.
batch_size: int, the number of examples per batch.
mode: 'train' | 'eval' to determine if training or evaluating.
Returns:
`Dataset` object.
"""
print("mode = {}".format(mode))
# Make a CSV dataset
dataset = tf.data.experimental.make_csv_dataset(
file_pattern=pattern,
batch_size=batch_size,
column_names=CSV_COLUMNS,
column_defaults=DEFAULTS)
# Map dataset to features and label
dataset = dataset.map(map_func=features_and_labels) # features, label
# Shuffle and repeat for training
if mode == 'train':
dataset = dataset.shuffle(buffer_size=1000).repeat()
# Take advantage of multi-threading; 1=AUTOTUNE
dataset = dataset.prefetch(buffer_size=1)
return dataset
def create_input_layers():
"""Creates dictionary of input layers for each feature.
Returns:
Dictionary of `tf.Keras.layers.Input` layers for each feature.
"""
deep_inputs = {
colname: tf.keras.layers.Input(
name=colname, shape=(), dtype="float32")
for colname in ["mother_age", "gestation_weeks"]
}
wide_inputs = {
colname: tf.keras.layers.Input(
name=colname, shape=(), dtype="string")
for colname in ["is_male", "plurality"]
}
inputs = {**wide_inputs, **deep_inputs}
return inputs
def categorical_fc(name, values):
"""Helper function to wrap categorical feature by indicator column.
Args:
name: str, name of feature.
values: list, list of strings of categorical values.
Returns:
Categorical and indicator column of categorical feature.
"""
cat_column = tf.feature_column.categorical_column_with_vocabulary_list(
key=name, vocabulary_list=values)
ind_column = tf.feature_column.indicator_column(
categorical_column=cat_column)
return cat_column, ind_column
def create_feature_columns(nembeds):
"""Creates wide and deep dictionaries of feature columns from inputs.
Args:
nembeds: int, number of dimensions to embed categorical column down to.
Returns:
Wide and deep dictionaries of feature columns.
"""
deep_fc = {
colname: tf.feature_column.numeric_column(key=colname)
for colname in ["mother_age", "gestation_weeks"]
}
wide_fc = {}
is_male, wide_fc["is_male"] = categorical_fc(
"is_male", ["True", "False", "Unknown"])
plurality, wide_fc["plurality"] = categorical_fc(
"plurality", ["Single(1)", "Twins(2)", "Triplets(3)",
"Quadruplets(4)", "Quintuplets(5)", "Multiple(2+)"])
# Bucketize the float fields. This makes them wide
age_buckets = tf.feature_column.bucketized_column(
source_column=deep_fc["mother_age"],
boundaries=np.arange(15, 45, 1).tolist())
wide_fc["age_buckets"] = tf.feature_column.indicator_column(
categorical_column=age_buckets)
gestation_buckets = tf.feature_column.bucketized_column(
source_column=deep_fc["gestation_weeks"],
boundaries=np.arange(17, 47, 1).tolist())
wide_fc["gestation_buckets"] = tf.feature_column.indicator_column(
categorical_column=gestation_buckets)
# Cross all the wide columns, have to do the crossing before we one-hot
crossed = tf.feature_column.crossed_column(
keys=[age_buckets, gestation_buckets],
hash_bucket_size=1000)
deep_fc["crossed_embeds"] = tf.feature_column.embedding_column(
categorical_column=crossed, dimension=nembeds)
return wide_fc, deep_fc
def get_model_outputs(wide_inputs, deep_inputs, dnn_hidden_units):
"""Creates model architecture and returns outputs.
Args:
wide_inputs: Dense tensor used as inputs to wide side of model.
deep_inputs: Dense tensor used as inputs to deep side of model.
dnn_hidden_units: List of integers where length is number of hidden
layers and ith element is the number of neurons at ith layer.
Returns:
Dense tensor output from the model.
"""
# Hidden layers for the deep side
layers = [int(x) for x in dnn_hidden_units]
deep = deep_inputs
for layerno, numnodes in enumerate(layers):
deep = tf.keras.layers.Dense(
units=numnodes,
activation="relu",
name="dnn_{}".format(layerno+1))(deep)
deep_out = deep
# Linear model for the wide side
wide_out = tf.keras.layers.Dense(
units=10, activation="relu", name="linear")(wide_inputs)
# Concatenate the two sides
both = tf.keras.layers.concatenate(
inputs=[deep_out, wide_out], name="both")
# Final output is a linear activation because this is regression
output = tf.keras.layers.Dense(
units=1, activation="linear", name="weight")(both)
return output
def rmse(y_true, y_pred):
"""Calculates RMSE evaluation metric.
Args:
y_true: tensor, true labels.
y_pred: tensor, predicted labels.
Returns:
Tensor with value of RMSE between true and predicted labels.
"""
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
def build_wide_deep_model(dnn_hidden_units=[64, 32], nembeds=3):
"""Builds wide and deep model using Keras Functional API.
Returns:
`tf.keras.models.Model` object.
"""
# Create input layers
inputs = create_input_layers()
# Create feature columns for both wide and deep
wide_fc, deep_fc = create_feature_columns(nembeds)
# The constructor for DenseFeatures takes a list of numeric columns
# The Functional API in Keras requires: LayerConstructor()(inputs)
wide_inputs = tf.keras.layers.DenseFeatures(
feature_columns=wide_fc.values(), name="wide_inputs")(inputs)
deep_inputs = tf.keras.layers.DenseFeatures(
feature_columns=deep_fc.values(), name="deep_inputs")(inputs)
# Get output of model given inputs
output = get_model_outputs(wide_inputs, deep_inputs, dnn_hidden_units)
# Build model and compile it all together
model = tf.keras.models.Model(inputs=inputs, outputs=output)
model.compile(optimizer="adam", loss="mse", metrics=[rmse, "mse"])
return model
def train_and_evaluate(args):
model = build_wide_deep_model(args["nnsize"], args["nembeds"])
print("Here is our Wide-and-Deep architecture so far:\n")
print(model.summary())
trainds = load_dataset(
args["train_data_path"],
args["batch_size"],
'train')
evalds = load_dataset(
args["eval_data_path"], 1000, 'eval')
if args["eval_steps"]:
evalds = evalds.take(count=args["eval_steps"])
num_batches = args["batch_size"] * args["num_epochs"]
steps_per_epoch = args["train_examples"] // num_batches
checkpoint_path = os.path.join(args["output_dir"], "checkpoints/babyweight")
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path, verbose=1, save_weights_only=True)
history = model.fit(
trainds,
validation_data=evalds,
epochs=args["num_epochs"],
steps_per_epoch=steps_per_epoch,
verbose=2, # 0=silent, 1=progress bar, 2=one line per epoch
callbacks=[cp_callback])
EXPORT_PATH = os.path.join(
args["output_dir"], datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
tf.saved_model.save(
obj=model, export_dir=EXPORT_PATH) # with default serving function
hp_metric = history.history['val_rmse'][-1]
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='rmse',
metric_value=hp_metric,
global_step=args['num_epochs'])
print("Exported trained model to {}".format(EXPORT_PATH))
| apache-2.0 | 4,712,138,857,744,311,000 | 32.038732 | 80 | 0.655121 | false |
torrances/alchemy-api | taxonomy/create-tsv.py | 1 | 1548 | #!/usr/bin/python
from __future__ import print_function
from alchemyapi import AlchemyAPI
import argparse
import file_utils
import json
import sys
import os
from collections import namedtuple
alchemyapi = AlchemyAPI()
## ARGPARSE USAGE
## <https://docs.python.org/2/howto/argparse.html>
parser = argparse.ArgumentParser(description="Call AlchemyAPI Taxonomy")
group = parser.add_mutually_exclusive_group()
parser.add_argument("input", help="The input path for importing. This can be either a file or directory.")
parser.add_argument("output", help="The output path for writing (exporting).")
args = parser.parse_args()
print("*Input Path is {0}".format(args.input))
print("*Output Path is {0}".format(args.output))
def taxonomize(text) :
rows = []
response = alchemyapi.taxonomy('text', text)
if response['status'] == 'OK' :
parsed_json = json.loads(json.dumps(response))
for node in parsed_json['taxonomy']:
rows.append(("{0}\t{1}\t{2}\n".format(node['score'], node['label'], text)))
return rows
def create(input, output) :
for file in file_utils.getfiles(input, "txt", True) :
text = "";
fname = file.split("/")[-1]
print("*Input Filename is {0} from file {1}".format(fname, file))
target = open("{0}/{1}.tsv".format(output, fname), "w+")
lines = [line.rstrip('\n') for line in open(file)]
for line in lines :
text = text + " " + line
for row in taxonomize(line) :
target.write(row)
for row in taxonomize(text) :
target.write(row)
target.close
create(args.input, args.output) | apache-2.0 | -4,775,753,114,986,026,000 | 27.685185 | 108 | 0.685401 | false |
serge-sans-paille/pythran | pythran/analyses/inlinable.py | 1 | 1221 | """ Inlinable list function that may be inlined. """
from pythran.passmanager import ModuleAnalysis
from pythran.analyses import Identifiers
from pythran.analyses.pure_expressions import PureExpressions
import pythran.metadata as metadata
import gast as ast
import copy
class Inlinable(ModuleAnalysis):
""" Determine set of inlinable function.
A function can be inlined if it has only one statement and doesn't
recurse on itself.
"""
def __init__(self):
self.result = dict()
super(Inlinable, self).__init__(PureExpressions)
def visit_FunctionDef(self, node):
""" Determine this function definition can be inlined. """
if len(node.body) != 1:
return
sbody = node.body[0]
if not isinstance(sbody, (ast.Call, ast.Return)):
return
# only consider static return if they are pure
if metadata.get(sbody, metadata.StaticReturn):
if sbody not in self.pure_expressions:
return
ids = self.gather(Identifiers, sbody)
# FIXME : It mark "not inlinable" def foo(foo): return foo
if node.name not in ids:
self.result[node.name] = copy.deepcopy(node)
| bsd-3-clause | 3,000,972,377,086,779,000 | 28.780488 | 70 | 0.651106 | false |
martijnkoopman/thesis | 3_4_compress_cells.py | 1 | 2720 | # Flatten navigable space to one voxel thick layer
# For walking and driving actors
# May 2016 - Martijn Koopman
# Input
idi = self.GetInput()
dims = idi.GetDimensions()
numTuples = dims[0]*dims[1]*dims[2]
cellsArr = idi.GetPointData().GetArray('cell_id')
typesArr = idi.GetPointData().GetArray('cell_type')
sizesArr = idi.GetPointData().GetArray('cell_size')
# Output
ido = self.GetOutput()
outCellsArr = vtk.vtkTypeUInt32Array()
outCellsArr.SetName('cell_id')
outCellsArr.SetNumberOfComponents(1)
outCellsArr.SetNumberOfTuples(numTuples)
outTypesArr = vtk.vtkUnsignedCharArray()
outTypesArr.SetName('cell_type')
outTypesArr.SetNumberOfComponents(1)
outTypesArr.SetNumberOfTuples(numTuples)
outSizesArr = vtk.vtkTypeUInt32Array()
outSizesArr.SetName('cell_size')
outSizesArr.SetNumberOfComponents(1)
outSizesArr.SetNumberOfTuples(numTuples)
# Copy old values
for i in range(0, numTuples):
outCellsArr.SetValue(i, 0)
outTypesArr.SetValue(i, 0)
outSizesArr.SetValue(i, 0)
# Utility functions
def GetArrValue(arr, pos):
if pos[0] < 0 or pos[0] >= dims[0] or pos[1] < 0 or pos[1] >= dims[1] or pos[2] < 0 or pos[2] >= dims[2]:
return 0
else:
i = pos[0] + (pos[1] * dims[0]) + (pos[2] * dims[0] * dims[1])
return arr.GetValue(i)
def SetArrValue(arr, pos, val):
if pos[0] < 0 or pos[0] >= dims[0] or pos[1] < 0 or pos[1] >= dims[1] or pos[2] < 0 or pos[2] >= dims[2]:
return
i = pos[0] + (pos[1] * dims[0]) + (pos[2] * dims[0] * dims[1])
arr.SetValue(i, val)
# Flatten cells
cell_voxels = {}
for x in range(dims[0]):
for y in range(dims[1]):
for z in range(dims[2]):
pos = (x,y,z)
cell_id = GetArrValue(cellsArr, pos)
cell_id_underneath = GetArrValue(cellsArr, (x,y,z-1))
cell_type = GetArrValue(typesArr, pos)
if cell_id > 0 and cell_id_underneath == 0:
SetArrValue(outCellsArr, (x,y,z), cell_id)
SetArrValue(outTypesArr, (x,y,z), cell_type)
SetArrValue(outSizesArr, (x,y,z), 0)
if not cell_id in cell_voxels:
cell_voxels[cell_id] = [pos]
else:
cell_voxels[cell_id].append(pos)
# Compute new sizes
cell_sizes = {}
for cell_id in cell_voxels:
voxels = cell_voxels[cell_id]
cell_sizes[cell_id] = len(voxels)
for voxel in voxels:
SetArrValue(outSizesArr, voxel, len(voxels))
ido.GetPointData().AddArray(outCellsArr)
ido.GetPointData().AddArray(outTypesArr)
ido.GetPointData().AddArray(outSizesArr) | gpl-3.0 | 7,206,716,420,459,701,000 | 31.604938 | 109 | 0.606985 | false |
M157q/zdict | zdict/dictionaries/moe.py | 1 | 5222 | import json
import unicodedata # to detect Unicode category
from zdict.dictionary import DictBase
from zdict.exceptions import QueryError, NotFoundError
from zdict.models import Record
class MoeDict(DictBase):
API = 'https://www.moedict.tw/uni/{word}'
@property
def provider(self):
return 'moe'
@property
def title(self):
return '萌典'
def _get_url(self, word) -> str:
return self.API.format(word=word)
def show(self, record: Record):
content = json.loads(record.content)
# print word
self.color.print(content.get('title', ''), 'yellow')
for word in content.get('heteronyms', ''):
# print pronounce
for key, display in (
('bopomofo', '注音'),
('bopomofo2', '注音二式'),
('pinyin', '漢語拼音')
):
self.color.print(display, end='')
self.color.print(
'[' + word.get(key, '') + ']',
'lwhite',
end=' ',
)
print()
print()
# print explain
for count, explain in enumerate(word.get('definitions', '')):
self.color.print(
'{order}. {text}'.format(
order=count + 1,
text=explain.get('def', '')
),
)
if explain.get('synonyms'):
self.color.print(
'同義詞: {text}'.format(text=explain['synonyms']),
'magenta',
indent=2,
)
if explain.get('antonyms'):
self.color.print(
'反義詞: {text}'.format(text=explain['antonyms']),
'magenta',
indent=2,
)
for example in explain.get('example', ''):
self.color.print(
example,
'indigo',
indent=2,
)
for quote in explain.get('quote', ''):
self.color.print(
'[引用] {text}'.format(text=quote),
'green',
indent=2,
)
print()
def query(self, word: str):
try:
content = self._get_raw(word)
except QueryError as exception:
raise NotFoundError(exception.word)
record = Record(
word=word,
content=content,
source=self.provider,
)
return record
def is_other_format(char):
return unicodedata.category(char) != 'Cf'
def remove_cf(data):
return ''.join(filter(is_other_format, data))
def clean(data, clean_cf=False):
'''
Clean the word segmentation
remove "`~" and things in Unicode 'Cf' category
'''
data = data.translate(str.maketrans('', '', '`~'))
if clean_cf:
return remove_cf(data)
else:
return data
class MoeDictTaiwanese(DictBase):
API = 'https://www.moedict.tw/t/{word}.json'
@property
def provider(self):
return 'moe-taiwanese'
@property
def title(self):
return '萌典(臺)'
def _get_url(self, word) -> str:
return self.API.format(word=word)
def show(self, record: Record):
content = json.loads(record.content)
# print word
self.color.print(clean(content.get('t', '')), 'yellow')
for word in content.get('h', ''):
# print pronounce
for key, display in (
# TODO: where is bopomofo ?
('T', '臺羅拼音'), # Tailo
):
self.color.print(display, end='')
self.color.print(
'[' + word.get(key, '') + ']',
'lwhite',
end=' ',
)
print()
print()
# print explain
for count, explain in enumerate(word.get('d', '')):
self.color.print('{order}. '.format(order=count + 1), end='')
type = clean(explain.get('type', ''))
if type:
self.color.print(
'[' + type + ']',
'lgreen',
end=' ',
)
self.color.print(clean(explain.get('f', '')), end='')
for example in explain.get('e', ''):
self.color.print(
clean(example, True),
'indigo',
indent=2,
)
print()
print()
def query(self, word: str):
try:
content = self._get_raw(word)
except QueryError as exception:
raise NotFoundError(exception.word)
record = Record(
word=word,
content=content,
source=self.provider,
)
return record
| gpl-3.0 | -3,188,912,346,079,816,000 | 25.080808 | 77 | 0.431255 | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/apport/general-hooks/ubuntu.py | 1 | 14954 | '''Attach generally useful information, not specific to any package.
Copyright (C) 2009 Canonical Ltd.
Authors: Matt Zimmerman <[email protected]>,
Brian Murray <[email protected]>
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version. See http://www.gnu.org/copyleft/gpl.html for
the full text of the license.
'''
import apport.packaging
import re, os, os.path, pwd, time
from urlparse import urljoin
from urllib2 import urlopen
from apport.hookutils import *
from apport import unicode_gettext as _
def add_info(report, ui):
add_release_info(report)
add_kernel_info(report)
add_cloud_info(report)
report['ApportVersion'] = apport.packaging.get_version('apport')
if report.get('ProblemType') == 'Package':
check_for_disk_error(report)
match_error_messages(report)
for log in ['DpkgTerminalLog', 'VarLogDistupgradeApttermlog']:
if log in report:
check_attachment_for_errors(report, log)
wrong_grub_msg = _('''Your system was initially configured with grub version 2, but you have removed it from your system in favor of grub 1 without configuring it. To ensure your bootloader configuration is updated whenever a new kernel is available, open a terminal and run:
sudo apt-get install grub-pc
''')
if 'DpkgTerminalLog' in report \
and re.search(r'^Not creating /boot/grub/menu.lst as you wish', report['DpkgTerminalLog'], re.MULTILINE):
grub_hook_failure = True
else:
grub_hook_failure = False
# crash reports from live system installer often expose target mount
for f in ('ExecutablePath', 'InterpreterPath'):
if f in report and report[f].startswith('/target/'):
report[f] = report[f][7:]
# Allow filing update-manager bugs with obsolete packages
if report.get('Package', '').startswith('update-manager'):
os.environ['APPORT_IGNORE_OBSOLETE_PACKAGES'] = '1'
# file bugs against OEM project for modified packages
if 'Package' in report:
v = report['Package'].split()[1]
oem_project = get_oem_project(report)
if oem_project and ('common' in v or oem_project in v):
report['CrashDB'] = 'canonical-oem'
if 'Package' in report:
package = report['Package'].split()[0]
if package:
attach_conffiles(report, package, ui=ui)
# do not file bugs against "upgrade-system" if it is not installed (LP#404727)
if package == 'upgrade-system' and 'not installed' in report['Package']:
report['UnreportableReason'] = 'You do not have the upgrade-system package installed. Please report package upgrade failures against the package that failed to install, or against upgrade-manager.'
if 'Package' in report:
package = report['Package'].split()[0]
if package:
attach_upstart_overrides(report, package)
# build a duplicate signature tag for package reports
if report.get('ProblemType') == 'Package':
if 'DpkgTerminalLog' in report:
termlog = report['DpkgTerminalLog']
elif 'VarLogDistupgradeApttermlog' in report:
termlog = report['VarLogDistupgradeApttermlog']
else:
termlog = None
if termlog:
dupe_sig = ''
for line in termlog.split('\n'):
if line.startswith('Setting up') or line.startswith('Unpacking'):
dupe_sig = '%s\n' % line
continue
dupe_sig += '%s\n' % line
if 'dpkg: error' in dupe_sig and line.startswith(' '):
if 'trying to overwrite' in line:
conflict_pkg = re.search('in package (.*) ', line)
if conflict_pkg and not apport.packaging.is_distro_package(conflict_pkg.group(1)):
report['UnreportableReason'] = _('An Ubuntu package has a file conflict with a package that is not a genuine Ubuntu package')
add_tag(report, 'package-conflict')
if 'Setting up' in dupe_sig or 'Unpacking' in dupe_sig:
report['DuplicateSignature'] = dupe_sig
# the duplicate signature should be the first failure
break
# running Unity?
username = pwd.getpwuid(os.geteuid()).pw_name
if subprocess.call(['killall', '-s0', '-u', username,
'unity-panel-service'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT) == 0:
add_tag(report, 'running-unity')
def match_error_messages(report):
# There are enough of these now that it is probably worth refactoring...
# -mdz
if report['ProblemType'] == 'Package':
if 'failed to install/upgrade: corrupted filesystem tarfile' in report.get('Title', ''):
report['UnreportableReason'] = 'This failure was caused by a corrupted package download or file system corruption.'
if 'is already installed and configured' in report.get('ErrorMessage', ''):
report['SourcePackage'] = 'dpkg'
def check_attachment_for_errors(report, attachment):
if report['ProblemType'] == 'Package':
trim_dpkg_log(report)
if report['Package'] not in ['grub', 'grub2']:
# linux-image postinst emits this when update-grub fails
# https://wiki.ubuntu.com/KernelTeam/DebuggingUpdateErrors
grub_errors = [r'^User postinst hook script \[.*update-grub\] exited with value',
r'^run-parts: /etc/kernel/post(inst|rm).d/zz-update-grub exited with return code [1-9]+',
r'^/usr/sbin/grub-probe: error']
for grub_error in grub_errors:
if attachment in report and re.search(grub_error, report[attachment], re.MULTILINE):
# File these reports on the grub package instead
grub_package = apport.packaging.get_file_package('/usr/sbin/update-grub')
if grub_package is None or grub_package == 'grub' and not 'grub-probe' in report[attachment]:
report['SourcePackage'] = 'grub'
if os.path.exists('/boot/grub/grub.cfg') \
and grub_hook_failure:
report['UnreportableReason'] = wrong_grub_msg
else:
report['SourcePackage'] = 'grub2'
if report['Package'] != 'initramfs-tools':
# update-initramfs emits this when it fails, usually invoked from the linux-image postinst
# https://wiki.ubuntu.com/KernelTeam/DebuggingUpdateErrors
if attachment in report and re.search(r'^update-initramfs: failed for ', report[attachment], re.MULTILINE):
# File these reports on the initramfs-tools package instead
report['SourcePackage'] = 'initramfs-tools'
if report['Package'] in ['emacs22', 'emacs23', 'emacs-snapshot', 'xemacs21']:
# emacs add-on packages trigger byte compilation, which might fail
# we are very interested in reading the compilation log to determine
# where to reassign this report to
regex = r'^!! Byte-compilation for x?emacs\S+ failed!'
if attachment in report and re.search(regex, report[attachment], re.MULTILINE):
for line in report[attachment].split('\n'):
m = re.search(r'^!! and attach the file (\S+)', line)
if m:
path = m.group(1)
attach_file_if_exists(report, path)
if report['Package'].startswith('linux-image-') and attachment in report:
# /etc/kernel/*.d failures from kernel package postinst
m = re.search(r'^run-parts: (/etc/kernel/\S+\.d/\S+) exited with return code \d+', report[attachment], re.MULTILINE)
if m:
path = m.group(1)
package = apport.packaging.get_file_package(path)
if package:
report['SourcePackage'] = package
report['ErrorMessage'] = m.group(0)
if package == 'grub-pc' and grub_hook_failure:
report['UnreportableReason'] = wrong_grub_msg
else:
report['UnreportableReason'] = 'This failure was caused by a program which did not originate from Ubuntu'
if 'failed to install/upgrade: corrupted filesystem tarfile' in report.get('Title', ''):
report['UnreportableReason'] = 'This failure was caused by a corrupted package download or file system corruption.'
if 'is already installed and configured' in report.get('ErrorMessage', ''):
report['SourcePackage'] = 'dpkg'
def check_for_disk_error(report):
devs_to_check = []
if not 'Dmesg.txt' in report and not 'CurrentDmesg.txt' in report:
return
if not 'Df.txt' in report:
return
df = report['Df.txt']
for line in df:
line = line.strip('\n')
if line.endswith('/') or line.endswith('/usr') or line.endswith('/var'):
# without manipulation it'd look like /dev/sda1
device = line.split(' ')[0].strip('0123456789')
device = device.replace('/dev/', '')
devs_to_check.append(device)
dmesg = report.get('CurrentDmesg.txt', report['Dmesg.txt'])
for line in dmesg:
line = line.strip('\n')
if 'I/O error' in line:
# no device in this line
if 'journal commit I/O error' in line:
continue
if not 'JBD2' in line:
error_device = line.split(' ')[6].strip(',')
elif 'JBD2' in line:
error_device = line.split(' ')[-1].split('-')[0]
error_device = error_device.strip('0123456789')
if error_device in devs_to_check:
report['UnreportableReason'] = 'This failure was caused by a hardware error on /dev/%s' % error_device
def add_kernel_info(report):
# This includes the Ubuntu packaged kernel version
attach_file_if_exists(report, '/proc/version_signature', 'ProcVersionSignature')
def add_release_info(report):
# https://bugs.launchpad.net/bugs/364649
attach_file_if_exists(report, '/var/log/installer/media-info',
'InstallationMedia')
# if we are running from a live system, add the build timestamp
attach_file_if_exists(report, '/cdrom/.disk/info', 'LiveMediaBuild')
if os.path.exists('/cdrom/.disk/info'):
report['CasperVersion'] = apport.packaging.get_version('casper')
# https://wiki.ubuntu.com/FoundationsTeam/Specs/OemTrackingId
attach_file_if_exists(report, '/var/lib/ubuntu_dist_channel',
'DistributionChannelDescriptor')
release_codename = command_output(['lsb_release', '-sc'])
if release_codename.startswith('Error'):
release_codename = None
else:
add_tag(report, release_codename)
log ='/var/log/dist-upgrade/apt.log'
if os.path.exists(log):
mtime = os.stat(log).st_mtime
human_mtime = time.strftime('%Y-%m-%d', time.gmtime(mtime))
delta = time.time() - mtime
# Would be nice if this also showed which release was originally installed
report['UpgradeStatus'] = 'Upgraded to %s on %s (%d days ago)' % (release_codename, human_mtime, delta / 86400)
else:
report['UpgradeStatus'] = 'No upgrade log present (probably fresh install)'
def add_cloud_info(report):
# EC2 and Ubuntu Enterprise Cloud instances
ec2_instance = False
for pkg in ('ec2-init', 'cloud-init'):
try:
if apport.packaging.get_version(pkg):
ec2_instance = True
break
except ValueError:
pass
if ec2_instance:
metadata_url = 'http://169.254.169.254/latest/meta-data/'
ami_id_url = urljoin(metadata_url, 'ami-id')
try:
ami = urlopen(ami_id_url).read()
except:
ami = None
if ami is None:
cloud = None
elif ami.startswith('ami'):
cloud = 'ec2'
add_tag(report, 'ec2-images')
fields = { 'Ec2AMIManifest':'ami-manifest-path',
'Ec2Kernel':'kernel-id',
'Ec2Ramdisk':'ramdisk-id',
'Ec2InstanceType':'instance-type',
'Ec2AvailabilityZone':'placement/availability-zone' }
report['Ec2AMI'] = ami
for key,value in fields.items():
try:
report[key]=urlopen(urljoin(metadata_url, value)).read()
except:
report[key]='unavailable'
else:
cloud = 'uec'
add_tag(report, 'uec-images')
def add_tag(report, tag):
report.setdefault('Tags', '')
report['Tags'] += ' ' + tag
def get_oem_project(report):
'''Determine OEM project name from Distribution Channel Descriptor
Return None if it cannot be determined or does not exist.
'''
dcd = report.get('DistributionChannelDescriptor', None)
if dcd and dcd.startswith('canonical-oem-'):
return dcd.split('-')[2]
return None
def trim_dpkg_log(report):
'''Trim DpkgTerminalLog to the most recent installation session.'''
if 'DpkgTerminalLog' not in report:
return
lines = []
trim_re = re.compile('^\(.* ... \d+ .*\)$')
for line in report['DpkgTerminalLog'].splitlines():
if line.startswith('Log started: ') or trim_re.match(line):
lines = []
continue
lines.append(line)
report['DpkgTerminalLog'] = '\n'.join(lines)
if not report['DpkgTerminalLog'].strip():
report['UnreportableReason'] = '/var/log/apt/term.log does not contain any data'
if __name__ == '__main__':
import sys
# for testing: update report file given on command line
if len(sys.argv) != 2:
print >> sys.stderr, 'Usage for testing this hook: %s <report file>' % sys.argv[0]
sys.exit(1)
report_file = sys.argv[1]
report = apport.Report()
report.load(open(report_file))
report_keys = set(report.keys())
new_report = report.copy()
add_info(new_report, None)
new_report_keys = set(new_report.keys())
# Show differences
changed = 0
for key in sorted(report_keys | new_report_keys):
if key in new_report_keys and key not in report_keys:
print "+%s: %s" % (key, new_report[key])
changed += 1
elif key in report_keys and key not in new_report_keys:
print "-%s: (deleted)" % key
changed += 1
print "%d items changed" % changed
| gpl-3.0 | 9,189,838,139,298,959,000 | 41.482955 | 280 | 0.600976 | false |
rougier/Neurosciences | basal-ganglia/topalidou-et-al-2014/cython/parameters.py | 1 | 1846 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Nicolas P. Rougier
# Distributed under the (new) BSD License.
#
# Contributors: Nicolas P. Rougier ([email protected])
# -----------------------------------------------------------------------------
# --- Time ---
ms = 0.001
dt = 1*ms
tau = 10*ms
# --- Learning ---
alpha_CUE = 0.050
alpha_LTP = 0.002
alpha_LTD = 0.001
# --- Sigmoid ---
Vmin = 0
Vmax = 20
Vh = 16
Vc = 3
# --- Model ---
decision_threshold = 40
noise = 0.001
CTX_rest = -3.0
STR_rest = 0.0
STN_rest = -10.0
GPI_rest = 10.0
THL_rest = -40.0
# --- Cues & Rewards ---
V_cue = 7
rewards = 3/3.,2/3.,1/3.,0/3.
# -- Weight ---
Wmin = 0.25
Wmax = 0.75
gains = { "CTX.cog -> STR.cog" : +1.0,
"CTX.mot -> STR.mot" : +1.0,
"CTX.ass -> STR.ass" : +1.0,
"CTX.cog -> STR.ass" : +0.2,
"CTX.mot -> STR.ass" : +0.2,
"CTX.cog -> STN.cog" : +1.0,
"CTX.mot -> STN.mot" : +1.0,
"STR.cog -> GPI.cog" : -2.0,
"STR.mot -> GPI.mot" : -2.0,
"STR.ass -> GPI.cog" : -2.0,
"STR.ass -> GPI.mot" : -2.0,
"STN.cog -> GPI.cog" : +1.0,
"STN.mot -> GPI.mot" : +1.0,
"GPI.cog -> THL.cog" : -0.25,
"GPI.mot -> THL.mot" : -0.25,
"THL.cog -> CTX.cog" : +0.4,
"THL.mot -> CTX.mot" : +0.4,
"CTX.cog -> THL.cog" : +0.1,
"CTX.mot -> THL.mot" : +0.1,
"CTX.mot -> CTX.mot" : +0.5,
"CTX.cog -> CTX.cog" : +0.5,
"CTX.ass -> CTX.ass" : +0.5,
"CTX.ass -> CTX.cog" : +0.01,
"CTX.ass -> CTX.mot" : +0.025,
"CTX.cog -> CTX.ass" : +0.025,
"CTX.mot -> CTX.ass" : +0.01,
}
| bsd-3-clause | -2,140,185,102,197,079,600 | 25.371429 | 79 | 0.389491 | false |
qiubit/luminis | backend/api/series_attribute.py | 1 | 3125 | #
# Copyright (C) 2017 Rafał Michaluk
# Copyright (C) 2017 Dominik Murzynowski
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
import time
from pycnic.core import Handler
from pycnic.utils import requires_validation
from voluptuous import Schema, Required, Or
from .validators import non_empty_string, assert_attribute_does_not_exist
from database.model import Session, SeriesAttribute, EntityType
from database.helpers import get_all, get_one, update_last_data_modification_ts
class SeriesAttributeHandler(Handler):
def __init__(self):
self.session = Session()
def get(self, entity_type_id, ident=None):
entity_type = get_one(self.session, EntityType, id=entity_type_id)
if ident is None:
return [series.to_dict() for series in get_all(self.session, SeriesAttribute, entity_type=entity_type)]
else:
return get_one(self.session, SeriesAttribute, entity_type=entity_type, id=ident).to_dict()
@requires_validation(assert_attribute_does_not_exist(SeriesAttribute), with_route_params=True)
@requires_validation(Schema({
Required('name'): non_empty_string,
'type': Or('real', 'enum'),
'refresh_time': Or(int, None),
'is_favourite': bool,
}))
def post(self, entity_type_id):
data = self.request.data
entity_type = get_one(self.session, EntityType, id=entity_type_id)
series = SeriesAttribute(entity_type=entity_type, name=data['name'],
type=data.get('type', 'real'), refresh_time=data.get('refresh_time'),
is_favourite=data.get('is_favourite', False))
self.session.add(series)
self.session.commit()
update_last_data_modification_ts(self.session)
return {
'success': True,
'ID': series.id
}
@requires_validation(Schema({
'refresh_time': Or(int, None),
'is_favourite': bool,
}))
def put(self, entity_type_id, ident):
data = self.request.data
entity_type = get_one(self.session, EntityType, id=entity_type_id)
series = get_one(self.session, SeriesAttribute, entity_type=entity_type, id=ident)
if 'refresh_time' in data:
series.refresh_time = data['refresh_time']
if 'is_favourite' in data:
series.is_favourite = data['is_favourite']
self.session.commit()
update_last_data_modification_ts(self.session)
return {
'success': True,
'ID': series.id
}
def delete(self, entity_type_id, ident):
now = time.time()
entity_type = get_one(self.session, EntityType, id=entity_type_id) # check if route is correct
series = get_one(self.session, SeriesAttribute, entity_type=entity_type, id=ident)
series.delete_ts = now
for alert in series.alerts:
alert.delete_ts = now
self.session.commit()
update_last_data_modification_ts(self.session)
return {'success': True}
| mit | 3,283,949,692,257,291,000 | 37.097561 | 115 | 0.634123 | false |
madebr/ADuC832_demoproject | loader/dumphex.py | 1 | 1060 | #!/usr/bin/env python3
#=============================================================================
#Copyright (C) 2016, Anonymous Maarten
#
#This file is part of ADuC832 demoproject.
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#=============================================================================
import intelhex
import sys
ihx = intelhex.IntelHex(sys.argv[1])
ihx.padding = b'\ff'
ihx.dump()
| gpl-2.0 | 1,634,500,598,061,975,600 | 38.259259 | 80 | 0.653774 | false |
data-tsunami/museo-cachi | cachi/fields.py | 1 | 3734 | # -*- coding: utf-8 -*-
#======================================================================
# This file is part of "Museo-Cachi".
#
# Museo-Cachi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Museo-Cachi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Museo-Cachi. If not, see <http://www.gnu.org/licenses/>.
#======================================================================
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
class MultiFileInput(forms.FileInput):
def render(self, name, value, attrs={}):
attrs['multiple'] = 'multiple'
return super(MultiFileInput, self).render(name, None, attrs=attrs)
def value_from_datadict(self, data, files, name):
if hasattr(files, 'getlist'):
return files.getlist(name)
else:
return [files.get(name)]
class MultiFileField(forms.FileField):
widget = MultiFileInput
default_error_messages = {
'min_num': u"Mínimo %(min_num)s archivos. (Subidos: %(num_files)s).",
'max_num': u"Máximo %(max_num)s archivos. (Subidos: %(num_files)s).",
'file_size': u"%(uploaded_file_name)s, excede el tamaño de subida.",
'file_type': u"%(uploaded_file_name)s, el tipo de archivo es inválido.",
}
def __init__(self, *args, **kwargs):
self.min_num = kwargs.pop('min_num', 0)
self.max_num = kwargs.pop('max_num', None)
self.maximum_file_size = kwargs.pop('maximum_file_size', None)
super(MultiFileField, self).__init__(*args, **kwargs)
def to_python(self, data):
ret = []
for item in data:
ret.append(super(MultiFileField, self).to_python(item))
return ret
def validate(self, data):
super(MultiFileField, self).validate(data)
# num_files = len(data)
# if len(data) and not data[0]:
# num_files = 0
# if num_files < self.min_num:
# raise ValidationError(
# self.error_messages['min_num'] % {
# 'min_num': self.min_num,
# 'num_files': num_files,
# },
# code='invalid',
# )
# elif self.max_num and num_files > self.max_num:
# raise ValidationError(
# self.error_messages['max_num'] % {
# 'max_num': self.max_num,
# 'num_files': num_files,
# },
# code='invalid',
# )
# for uploaded_file in data:
# type = uploaded_file.content_type.split('/')[1]
# if not type in settings.CONTENT_TYPES:
# raise ValidationError(
# self.error_messages['file_type'] % {
# 'uploaded_file_name': uploaded_file.name,
# },
# code='invalid',
# )
# if uploaded_file.size > self.maximum_file_size:
# raise ValidationError(
# self.error_messages['file_size'] % {
# 'uploaded_file_name': uploaded_file.name,
# },
# code='invalid',
# )
| gpl-3.0 | -3,690,911,213,907,208,000 | 36.676768 | 80 | 0.530831 | false |
odoo-brazil/l10n-brazil-wip | sped/models/sped_documento.py | 1 | 40174 | # -*- coding: utf-8 -*-
#
# Copyright 2016 Taŭga Tecnologia
# Aristides Caldeira <[email protected]>
# License AGPL-3 or later (http://www.gnu.org/licenses/agpl)
#
from __future__ import division, print_function, unicode_literals
import logging
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
from odoo.addons.l10n_br_base.models.sped_base import SpedBase
from odoo.addons.l10n_br_base.constante_tributaria import (
TIPO_EMISSAO_NFE,
TIPO_EMISSAO,
MODELO_FISCAL,
ENTRADA_SAIDA,
ENTRADA_SAIDA_SAIDA,
SITUACAO_FISCAL,
SITUACAO_FISCAL_REGULAR,
AMBIENTE_NFE,
AMBIENTE_NFE_HOMOLOGACAO,
SITUACAO_NFE_AUTORIZADA,
TIPO_CONSUMIDOR_FINAL_CONSUMIDOR_FINAL,
INDICADOR_IE_DESTINATARIO,
TIPO_CONSUMIDOR_FINAL_NORMAL,
MODELO_FISCAL_NFCE,
MODELO_FISCAL_NFE,
MODELO_FISCAL_NFSE,
TIPO_EMISSAO_PROPRIA,
AMBIENTE_NFE_PRODUCAO,
TIPO_EMISSAO_NFE_NORMAL,
ENTRADA_SAIDA_ENTRADA,
ENTRADA_SAIDA_DICT,
TIPO_EMISSAO_DICT,
IE_DESTINATARIO,
ST_ISS,
NATUREZA_TRIBUTACAO_NFSE,
LIMITE_RETENCAO_PIS_COFINS_CSLL,
MODALIDADE_FRETE_DESTINATARIO_PROPRIO,
MODALIDADE_FRETE,
INDICADOR_PRESENCA_COMPRADOR_NAO_SE_APLICA,
INDICADOR_PRESENCA_COMPRADOR,
TIPO_CONSUMIDOR_FINAL,
FINALIDADE_NFE_NORMAL,
FINALIDADE_NFE,
IND_FORMA_PAGAMENTO,
IND_FORMA_PAGAMENTO_A_VISTA,
REGIME_TRIBUTARIO,
REGIME_TRIBUTARIO_SIMPLES,
INDICADOR_IE_DESTINATARIO_CONTRIBUINTE,
)
_logger = logging.getLogger(__name__)
try:
from pybrasil.data import parse_datetime, data_hora_horario_brasilia, \
formata_data
from pybrasil.valor.decimal import Decimal as D
from pybrasil.valor import formata_valor
except (ImportError, IOError) as err:
_logger.debug(err)
class SpedDocumento(SpedBase, models.Model):
_name = b'sped.documento'
_description = 'Documentos Fiscais'
_inherit = ['mail.thread']
_order = 'emissao, modelo, data_emissao desc, serie, numero desc'
_rec_name = 'descricao'
descricao = fields.Char(
string='Documento Fiscal',
compute='_compute_descricao',
)
empresa_id = fields.Many2one(
comodel_name='sped.empresa',
string='Empresa',
ondelete='restrict',
default=lambda self:
self.env['sped.empresa']._empresa_ativa('sped.documento')
)
empresa_cnpj_cpf = fields.Char(
string='CNPJ/CPF',
size=18,
related='empresa_id.cnpj_cpf',
readonly=True,
)
emissao = fields.Selection(
selection=TIPO_EMISSAO,
string='Tipo de emissão',
index=True,
)
modelo = fields.Selection(
selection=MODELO_FISCAL,
string='Modelo',
index=True,
)
data_hora_emissao = fields.Datetime(
string='Data de emissão',
index=True,
default=fields.Datetime.now,
)
data_hora_entrada_saida = fields.Datetime(
string='Data de entrada/saída',
index=True,
default=fields.Datetime.now,
)
data_emissao = fields.Date(
string='Data de emissão',
compute='_compute_data_hora_separadas',
store=True,
index=True,
)
hora_emissao = fields.Char(
string='Hora de emissão',
size=8,
compute='_compute_data_hora_separadas',
store=True,
)
data_entrada_saida = fields.Date(
string='Data de entrada/saída',
compute='_compute_data_hora_separadas',
store=True,
index=True,
)
hora_entrada_saida = fields.Char(
string='Hora de entrada/saída',
size=8,
compute='_compute_data_hora_separadas',
store=True,
)
serie = fields.Char(
string='Série',
size=3,
index=True,
)
numero = fields.Float(
string='Número',
index=True,
digits=(18, 0),
)
entrada_saida = fields.Selection(
selection=ENTRADA_SAIDA,
string='Entrada/Saída',
index=True,
default=ENTRADA_SAIDA_SAIDA,
)
situacao_fiscal = fields.Selection(
selection=SITUACAO_FISCAL,
string='Situação fiscal',
index=True,
default=SITUACAO_FISCAL_REGULAR,
)
ambiente_nfe = fields.Selection(
selection=AMBIENTE_NFE,
string='Ambiente da NF-e',
index=True,
default=AMBIENTE_NFE_HOMOLOGACAO,
)
tipo_emissao_nfe = fields.Selection(
selection=TIPO_EMISSAO_NFE,
string='Tipo de emissão da NF-e',
default=TIPO_EMISSAO_NFE_NORMAL,
)
ie_st = fields.Char(
string='IE do substituto tributário',
size=14,
)
municipio_fato_gerador_id = fields.Many2one(
comodel_name='sped.municipio',
string='Município do fato gerador',
)
operacao_id = fields.Many2one(
comodel_name='sped.operacao',
string='Operação',
ondelete='restrict',
)
#
# Campos da operação
#
regime_tributario = fields.Selection(
selection=REGIME_TRIBUTARIO,
string='Regime tributário',
default=REGIME_TRIBUTARIO_SIMPLES,
)
ind_forma_pagamento = fields.Selection(
selection=IND_FORMA_PAGAMENTO,
string='Tipo de pagamento',
default=IND_FORMA_PAGAMENTO_A_VISTA,
)
finalidade_nfe = fields.Selection(
selection=FINALIDADE_NFE,
string='Finalidade da NF-e',
default=FINALIDADE_NFE_NORMAL,
)
consumidor_final = fields.Selection(
selection=TIPO_CONSUMIDOR_FINAL,
string='Tipo do consumidor',
default=TIPO_CONSUMIDOR_FINAL_NORMAL,
)
presenca_comprador = fields.Selection(
selection=INDICADOR_PRESENCA_COMPRADOR,
string='Presença do comprador',
default=INDICADOR_PRESENCA_COMPRADOR_NAO_SE_APLICA,
)
modalidade_frete = fields.Selection(
selection=MODALIDADE_FRETE,
string='Modalidade do frete',
default=MODALIDADE_FRETE_DESTINATARIO_PROPRIO,
)
natureza_operacao_id = fields.Many2one(
comodel_name='sped.natureza.operacao',
string='Natureza da operação',
ondelete='restrict',
)
infadfisco = fields.Text(
string='Informações adicionais de interesse do fisco'
)
infcomplementar = fields.Text(
string='Informações complementares'
)
deduz_retencao = fields.Boolean(
string='Deduz retenção do total da NF?',
default=True
)
pis_cofins_retido = fields.Boolean(
string='PIS-COFINS retidos?'
)
al_pis_retido = fields.Monetary(
string='Alíquota do PIS',
default=0.65,
digits=(5, 2),
currency_field='currency_aliquota_id',
)
al_cofins_retido = fields.Monetary(
string='Alíquota da COFINS',
default=3,
digits=(5, 2),
currency_field='currency_aliquota_id',
)
csll_retido = fields.Boolean(
string='CSLL retido?',
)
al_csll = fields.Monetary(
string='Alíquota da CSLL',
default=1,
digits=(5, 2),
currency_field='currency_aliquota_id',
)
limite_retencao_pis_cofins_csll = fields.Monetary(
string='Obedecer limite de faturamento para retenção de',
default=LIMITE_RETENCAO_PIS_COFINS_CSLL,
)
irrf_retido = fields.Boolean(
string='IR retido?',
)
irrf_retido_ignora_limite = fields.Boolean(
string='IR retido ignora limite de R$ 10,00?',
)
al_irrf = fields.Monetary(
string='Alíquota do IR',
default=1,
digits=(5, 2),
currency_field='currency_aliquota_id',
)
inss_retido = fields.Boolean(
string='INSS retido?',
index=True,
)
al_inss_retido = fields.Monetary(
string='Alíquota do INSS',
digits=(5, 2),
currency_field='currency_aliquota_id',
)
al_inss = fields.Monetary(
string='Alíquota do INSS',
digits=(5, 2),
currency_field='currency_aliquota_id',
)
cnae_id = fields.Many2one(
comodel_name='sped.cnae',
string='CNAE',
)
natureza_tributacao_nfse = fields.Selection(
selection=NATUREZA_TRIBUTACAO_NFSE,
string='Natureza da tributação',
)
servico_id = fields.Many2one(
comodel_name='sped.servico',
string='Serviço',
)
cst_iss = fields.Selection(
selection=ST_ISS,
string='CST ISS',
)
#
# Destinatário/Remetente
#
participante_id = fields.Many2one(
comodel_name='sped.participante',
string='Destinatário/Remetente',
ondelete='restrict',
)
participante_cnpj_cpf = fields.Char(
string='CNPJ/CPF',
size=18,
related='participante_id.cnpj_cpf',
readonly=True,
)
participante_tipo_pessoa = fields.Char(
string='Tipo pessoa',
size=1,
related='participante_id.tipo_pessoa',
readonly=True,
)
participante_razao_social = fields.Char(
string='Razão Social',
size=60,
related='participante_id.razao_social',
readonly=True,
)
participante_fantasia = fields.Char(
string='Fantasia',
size=60,
related='participante_id.fantasia',
readonly=True,
)
participante_endereco = fields.Char(
string='Endereço',
size=60,
related='participante_id.endereco',
readonly=True,
)
participante_numero = fields.Char(
string='Número',
size=60,
related='participante_id.numero',
readonly=True,
)
participante_complemento = fields.Char(
string='Complemento',
size=60,
related='participante_id.complemento',
readonly=True,
)
participante_bairro = fields.Char(
string='Bairro',
size=60,
related='participante_id.bairro',
readonly=True,
)
participante_municipio_id = fields.Many2one(
comodel_name='sped.municipio',
string='Município',
related='participante_id.municipio_id',
readonly=True,
)
participante_cidade = fields.Char(
string='Município',
related='participante_id.cidade',
readonly=True,
)
participante_estado = fields.Char(
string='Estado',
related='participante_id.estado',
readonly=True,
)
participante_cep = fields.Char(
string='CEP',
size=9,
related='participante_id.cep',
readonly=True,
)
#
# Telefone e email para a emissão da NF-e
#
participante_fone = fields.Char(
string='Fone',
size=18,
related='participante_id.fone',
readonly=True,
)
participante_fone_comercial = fields.Char(
string='Fone Comercial',
size=18,
related='participante_id.fone_comercial',
readonly=True,
)
participante_celular = fields.Char(
string='Celular',
size=18,
related='participante_id.celular',
readonly=True,
)
participante_email = fields.Char(
string='Email',
size=60,
related='participante_id.email',
readonly=True,
)
#
# Inscrições e documentos
#
participante_contribuinte = fields.Selection(
selection=IE_DESTINATARIO,
string='Contribuinte',
default='2',
related='participante_id.contribuinte',
readonly=True,
)
participante_ie = fields.Char(
string='Inscrição estadual',
size=18,
related='participante_id.ie',
readonly=True,
)
participante_eh_orgao_publico = fields.Boolean(
string='É órgão público?',
related='participante_id.eh_orgao_publico',
readonly=True,
)
#
# Chave e validação da chave
#
chave = fields.Char(
string='Chave',
size=44,
)
#
# Duplicatas e pagamentos
#
payment_term_id = fields.Many2one(
comodel_name='account.payment.term',
string='Forma de pagamento',
ondelete='restrict',
domain=[('forma_pagamento', '!=', False)],
)
duplicata_ids = fields.One2many(
comodel_name='sped.documento.duplicata',
inverse_name='documento_id',
string='Duplicatas',
)
pagamento_ids = fields.One2many(
comodel_name='sped.documento.pagamento',
inverse_name='documento_id',
string='Pagamentos',
)
#
# Transporte
#
transportadora_id = fields.Many2one(
comodel_name='res.partner',
string='Transportadora',
ondelete='restrict',
domain=[['cnpj_cpf', '!=', False]],
)
veiculo_id = fields.Many2one(
comodel_name='sped.veiculo',
string='Veículo',
ondelete='restrict',
)
reboque_1_id = fields.Many2one(
comodel_name='sped.veiculo',
string='Reboque 1',
ondelete='restrict',
)
reboque_2_id = fields.Many2one(
comodel_name='sped.veiculo',
string='Reboque 2',
ondelete='restrict',
)
reboque_3_id = fields.Many2one(
comodel_name='sped.veiculo',
string='Reboque 3',
ondelete='restrict',
)
reboque_4_id = fields.Many2one(
comodel_name='sped.veiculo',
string='Reboque 4',
ondelete='restrict',
)
reboque_5_id = fields.Many2one(
comodel_name='sped.veiculo',
string='Reboque 5',
ondelete='restrict',
)
volume_ids = fields.One2many(
comodel_name='sped.documento.volume',
inverse_name='documento_id',
string='Volumes'
)
#
# Exportação
#
exportacao_estado_embarque_id = fields.Many2one(
comodel_name='sped.estado',
string='Estado do embarque',
ondelete='restrict',
)
exportacao_local_embarque = fields.Char(
string='Local do embarque',
size=60,
)
#
# Compras públicas
#
compra_nota_empenho = fields.Char(
string='Identificação da nota de empenho (compra pública)',
size=17,
)
compra_pedido = fields.Char(
string='Pedido (compra pública)',
size=60,
)
compra_contrato = fields.Char(
string='Contrato (compra pública)',
size=60,
)
#
# Totais dos itens
#
# Valor total dos produtos
vr_produtos = fields.Monetary(
string='Valor dos produtos/serviços',
compute='_compute_soma_itens',
store=True,
)
vr_produtos_tributacao = fields.Monetary(
string='Valor dos produtos para tributação',
compute='_compute_soma_itens',
store=True,
)
vr_frete = fields.Monetary(
string='Valor do frete',
compute='_compute_soma_itens',
store=True,
)
vr_seguro = fields.Monetary(
string='Valor do seguro',
compute='_compute_soma_itens',
store=True,
)
vr_desconto = fields.Monetary(
string='Valor do desconto',
compute='_compute_soma_itens',
store=True,
)
vr_outras = fields.Monetary(
string='Outras despesas acessórias',
compute='_compute_soma_itens',
store=True,
)
vr_operacao = fields.Monetary(
string='Valor da operação',
compute='_compute_soma_itens',
store=True
)
vr_operacao_tributacao = fields.Monetary(
string='Valor da operação para tributação',
compute='_compute_soma_itens',
store=True
)
# ICMS próprio
bc_icms_proprio = fields.Monetary(
string='Base do ICMS próprio',
compute='_compute_soma_itens',
store=True
)
vr_icms_proprio = fields.Monetary(
string='Valor do ICMS próprio',
compute='_compute_soma_itens',
store=True
)
# ICMS SIMPLES
vr_icms_sn = fields.Monetary(
string='Valor do crédito de ICMS - SIMPLES Nacional',
compute='_compute_soma_itens',
store=True
)
vr_simples = fields.Monetary(
string='Valor do SIMPLES Nacional',
compute='_compute_soma_itens',
store=True,
)
# ICMS ST
bc_icms_st = fields.Monetary(
string='Base do ICMS ST',
compute='_compute_soma_itens',
store=True
)
vr_icms_st = fields.Monetary(
string='Valor do ICMS ST',
compute='_compute_soma_itens',
store=True,
)
# ICMS ST retido
bc_icms_st_retido = fields.Monetary(
string='Base do ICMS retido anteriormente por '
'substituição tributária',
compute='_compute_soma_itens',
store=True,
)
vr_icms_st_retido = fields.Monetary(
string='Valor do ICMS retido anteriormente por '
'substituição tributária',
compute='_compute_soma_itens',
store=True,
)
# IPI
bc_ipi = fields.Monetary(
string='Base do IPI',
compute='_compute_soma_itens',
store=True
)
vr_ipi = fields.Monetary(
string='Valor do IPI',
compute='_compute_soma_itens',
store=True,
)
# Imposto de importação
bc_ii = fields.Monetary(
string='Base do imposto de importação',
compute='_compute_soma_itens',
store=True,
)
vr_despesas_aduaneiras = fields.Monetary(
string='Despesas aduaneiras',
compute='_compute_soma_itens',
store=True,
)
vr_ii = fields.Monetary(
string='Valor do imposto de importação',
compute='_compute_soma_itens',
store=True,
)
vr_iof = fields.Monetary(
string='Valor do IOF',
compute='_compute_soma_itens',
store=True,
)
# PIS e COFINS
bc_pis_proprio = fields.Monetary(
string='Base do PIS próprio',
compute='_compute_soma_itens',
store=True,
)
vr_pis_proprio = fields.Monetary(
string='Valor do PIS próprio',
compute='_compute_soma_itens',
store=True,
)
bc_cofins_proprio = fields.Monetary(
string='Base da COFINS própria',
compute='_compute_soma_itens',
store=True,
)
vr_cofins_proprio = fields.Monetary(
string='Valor do COFINS própria',
compute='_compute_soma_itens',
store=True,
)
# bc_pis_st = fields.Monetary(
# 'Base do PIS ST', compute='_compute_soma_itens', store=True)
# vr_pis_st = fields.Monetary(
# 'Valor do PIS ST', compute='_compute_soma_itens', store=True)
# bc_cofins_st = fields.Monetary(
# 'Base da COFINS ST', compute='_compute_soma_itens', store=True)
# vr_cofins_st = fields.Monetary(
# 'Valor do COFINS ST', compute='_compute_soma_itens', store=True)
#
# Totais dos itens (grupo ISS)
#
# ISS
bc_iss = fields.Monetary(
string='Base do ISS',
compute='_compute_soma_itens',
store=True,
)
vr_iss = fields.Monetary(
string='Valor do ISS',
compute='_compute_soma_itens',
store=True,
)
# Total da NF e da fatura (podem ser diferentes no caso de operação
# triangular)
vr_nf = fields.Monetary(
string='Valor da NF',
compute='_compute_soma_itens',
store=True,
)
vr_fatura = fields.Monetary(
string='Valor da fatura',
compute='_compute_soma_itens',
store=True,
)
vr_ibpt = fields.Monetary(
string='Valor IBPT',
compute='_compute_soma_itens',
store=True,
)
bc_inss_retido = fields.Monetary(
string='Base do INSS',
compute='_compute_soma_itens',
store=True,
)
vr_inss_retido = fields.Monetary(
string='Valor do INSS',
compute='_compute_soma_itens',
store=True,
)
vr_custo_comercial = fields.Monetary(
string='Custo comercial',
compute='_compute_soma_itens',
store=True,
)
vr_difal = fields.Monetary(
string='Valor do diferencial de alíquota ICMS próprio',
compute='_compute_soma_itens',
store=True,
)
vr_icms_estado_origem = fields.Monetary(
string='Valor do ICMS para o estado origem',
compute='_compute_soma_itens',
store=True,
)
vr_icms_estado_destino = fields.Monetary(
string='Valor do ICMS para o estado destino',
compute='_compute_soma_itens',
store=True,
)
vr_fcp = fields.Monetary(
string='Valor do fundo de combate à pobreza',
compute='_compute_soma_itens',
store=True,
)
###
# Retenções de tributos (órgãos públicos, substitutos tributários etc.)
###
# 'vr_operacao_pis_cofins_csll = CampoDinheiro(
# 'Base da retenção do PIS-COFINS e CSLL'),
# PIS e COFINS
# 'pis_cofins_retido = fields.boolean('PIS-COFINS retidos?'),
# 'al_pis_retido = CampoPorcentagem('Alíquota do PIS retido'),
# 'vr_pis_retido = CampoDinheiro('PIS retido'),
# 'al_cofins_retido = CampoPorcentagem('Alíquota da COFINS retida'),
# 'vr_cofins_retido = CampoDinheiro('COFINS retida'),
# Contribuição social sobre lucro líquido
# 'csll_retido = fields.boolean('CSLL retida?'),
# 'al_csll = CampoPorcentagem('Alíquota da CSLL'),
# 'vr_csll = CampoDinheiro('CSLL retida'),
# 'bc_csll_propria = CampoDinheiro('Base da CSLL própria'),
# 'al_csll_propria = CampoPorcentagem('Alíquota da CSLL própria'),
# 'vr_csll_propria = CampoDinheiro('CSLL própria'),
# IRRF
# 'irrf_retido = fields.boolean('IR retido?'),
# 'bc_irrf = CampoDinheiro('Base do IRRF'),
# 'al_irrf = CampoPorcentagem('Alíquota do IRRF'),
# 'vr_irrf = CampoDinheiro('Valor do IRRF'),
# 'bc_irpj_proprio = CampoDinheiro('Valor do IRPJ próprio'),
# 'al_irpj_proprio = CampoPorcentagem('Alíquota do IRPJ próprio'),
# 'vr_irpj_proprio = CampoDinheiro('Valor do IRPJ próprio'),
# ISS
# 'iss_retido = fields.boolean('ISS retido?'),
# 'bc_iss_retido = CampoDinheiro('Base do ISS'),
# 'vr_iss_retido = CampoDinheiro('Valor do ISS'),
item_ids = fields.One2many(
comodel_name='sped.documento.item',
inverse_name='documento_id',
string='Itens',
copy=True,
)
documento_referenciado_ids = fields.One2many(
comodel_name='sped.documento.referenciado',
inverse_name='documento_id',
string='Documentos Referenciados',
)
#
# Outras informações
#
eh_compra = fields.Boolean(
string='É compra?',
compute='_compute_eh_compra_venda',
)
eh_venda = fields.Boolean(
string='É venda?',
compute='_compute_eh_compra_venda',
)
eh_devolucao_compra = fields.Boolean(
string='É devolução de compra?',
compute='_compute_eh_compra_venda',
)
eh_devolucao_venda = fields.Boolean(
string='É devolução de venda?',
compute='_compute_eh_compra_venda',
)
permite_alteracao = fields.Boolean(
string='Permite alteração?',
compute='_compute_permite_alteracao',
)
permite_cancelamento = fields.Boolean(
string='Permite cancelamento?',
compute='_compute_permite_cancelamento',
)
@api.depends('emissao', 'entrada_saida', 'modelo', 'serie', 'numero',
'data_emissao', 'participante_id')
def _compute_descricao(self):
for documento in self:
txt = TIPO_EMISSAO_DICT[documento.emissao]
if documento.emissao == TIPO_EMISSAO_PROPRIA:
txt += ' - ' + ENTRADA_SAIDA_DICT[documento.entrada_saida]
txt += ' - ' + documento.modelo
txt += ' - ' + (documento.serie or '')
txt += ' - ' + formata_valor(documento.numero, casas_decimais=0)
txt += ' - ' + formata_data(documento.data_emissao)
if not documento.participante_id.cnpj_cpf:
txt += ' - Consumidor não identificado'
elif documento.participante_id.razao_social:
txt += ' - ' + documento.participante_id.razao_social
txt += ' - ' + documento.participante_id.cnpj_cpf
else:
txt += ' - ' + documento.participante_id.nome
txt += ' - ' + documento.participante_id.cnpj_cpf
documento.descricao = txt
@api.depends('modelo', 'emissao')
def _compute_permite_alteracao(self):
for documento in self:
documento.permite_alteracao = True
@api.depends('modelo', 'emissao')
def _compute_permite_cancelamento(self):
for documento in self:
documento.permite_cancelamento = True
@api.depends('data_hora_emissao', 'data_hora_entrada_saida')
def _compute_data_hora_separadas(self):
for documento in self:
data_hora_emissao = data_hora_horario_brasilia(
parse_datetime(documento.data_hora_emissao))
documento.data_emissao = str(data_hora_emissao)[:10]
documento.hora_emissao = str(data_hora_emissao)[11:19]
data_hora_entrada_saida = data_hora_horario_brasilia(
parse_datetime(documento.data_hora_entrada_saida))
documento.data_entrada_saida = str(data_hora_entrada_saida)[:10]
documento.hora_entrada_saida = str(data_hora_entrada_saida)[11:19]
@api.depends(
'item_ids.vr_nf',
'item_ids.vr_fatura',
)
def _compute_soma_itens(self):
CAMPOS_SOMA_ITENS = [
'vr_produtos', 'vr_produtos_tributacao',
'vr_frete', 'vr_seguro', 'vr_desconto', 'vr_outras',
'vr_operacao', 'vr_operacao_tributacao',
'bc_icms_proprio', 'vr_icms_proprio',
'vr_difal', 'vr_icms_estado_origem', 'vr_icms_estado_destino',
'vr_fcp',
'vr_icms_sn', 'vr_simples',
'bc_icms_st', 'vr_icms_st',
'bc_icms_st_retido', 'vr_icms_st_retido',
'bc_ipi', 'vr_ipi',
'bc_ii', 'vr_ii', 'vr_despesas_aduaneiras', 'vr_iof',
'bc_pis_proprio', 'vr_pis_proprio',
'bc_cofins_proprio', 'vr_cofins_proprio',
'bc_iss', 'vr_iss',
'vr_nf', 'vr_fatura',
'vr_ibpt',
'vr_custo_comercial'
]
for documento in self:
dados = {}
for campo in CAMPOS_SOMA_ITENS:
dados[campo] = D(0)
for item in documento.item_ids:
for campo in CAMPOS_SOMA_ITENS:
dados[campo] += getattr(item, campo, D(0))
documento.update(dados)
@api.depends('item_ids')
def _compute_eh_compra_venda(self):
for documento in self:
if documento.entrada_saida == ENTRADA_SAIDA_ENTRADA:
self.eh_venda = False
self.eh_devolucao_compra = False
for item in documento.item_ids:
if item.cfop_id.eh_compra:
self.eh_compra = True
self.eh_devolucao_venda = False
continue
elif item.cfop_id.eh_devolucao_venda:
self.eh_compra = False
self.eh_devolucao_venda = True
continue
else:
self.eh_compra = False
self.eh_devolucao_venda = False
for item in documento.item_ids:
if item.cfop_id.eh_venda:
self.eh_venda = True
self.eh_devolucao_compra = False
continue
elif item.cfop_id.eh_devolucao_compra:
self.eh_venda = False
self.eh_devolucao_compra = True
continue
@api.onchange('empresa_id', 'modelo', 'emissao')
def onchange_empresa_id(self):
res = {}
valores = {}
res['value'] = valores
if not self.empresa_id:
return res
if self.emissao != TIPO_EMISSAO_PROPRIA:
return res
if self.modelo not in (
MODELO_FISCAL_NFE, MODELO_FISCAL_NFCE, MODELO_FISCAL_NFSE):
return res
if self.modelo == MODELO_FISCAL_NFE:
valores['ambiente_nfe'] = self.empresa_id.ambiente_nfe
valores['tipo_emissao_nfe'] = self.empresa_id.tipo_emissao_nfe
if self.empresa_id.tipo_emissao_nfe == TIPO_EMISSAO_NFE_NORMAL:
if self.empresa_id.ambiente_nfe == AMBIENTE_NFE_PRODUCAO:
valores['serie'] = self.empresa_id.serie_nfe_producao
else:
valores['serie'] = self.empresa_id.serie_nfe_homologacao
else:
if self.empresa_id.ambiente_nfe == AMBIENTE_NFE_PRODUCAO:
valores['serie'] = (
self.empresa_id.serie_nfe_contingencia_producao
)
else:
valores['serie'] = (
self.empresa_id.serie_nfe_contingencia_homologacao
)
elif self.modelo == MODELO_FISCAL_NFCE:
valores['ambiente_nfe'] = self.empresa_id.ambiente_nfce
valores['tipo_emissao_nfe'] = self.empresa_id.tipo_emissao_nfce
if self.empresa_id.tipo_emissao_nfce == TIPO_EMISSAO_NFE_NORMAL:
if self.empresa_id.ambiente_nfce == AMBIENTE_NFE_PRODUCAO:
valores['serie'] = self.empresa_id.serie_nfce_producao
else:
valores['serie'] = self.empresa_id.serie_nfce_homologacao
else:
if self.empresa_id.ambiente_nfce == AMBIENTE_NFE_PRODUCAO:
valores['serie'] = (
self.empresa_id.serie_nfce_contingencia_producao
)
else:
valores['serie'] = (
self.empresa_id.serie_nfce_contingencia_homologacao
)
elif self.modelo == MODELO_FISCAL_NFSE:
valores['ambiente_nfe'] = self.empresa_id.ambiente_nfse
valores['tipo_emissao_nfe'] = TIPO_EMISSAO_NFE_NORMAL
if self.empresa_id.ambiente_nfse == AMBIENTE_NFE_PRODUCAO:
valores['serie_rps'] = self.empresa_id.serie_rps_producao
else:
valores['serie_rps'] = self.empresa_id.serie_rps_homologacao
return res
@api.onchange('operacao_id', 'emissao', 'natureza_operacao_id')
def onchange_operacao_id(self):
res = {}
valores = {}
res['value'] = valores
if not self.operacao_id:
return res
valores['modelo'] = self.operacao_id.modelo
valores['emissao'] = self.operacao_id.emissao
valores['entrada_saida'] = self.operacao_id.entrada_saida
if self.emissao == TIPO_EMISSAO_PROPRIA:
if self.operacao_id.natureza_operacao_id:
valores['natureza_operacao_id'] = (
self.operacao_id.natureza_operacao_id.id
)
if self.operacao_id.serie:
valores['serie'] = self.operacao_id.serie
valores['regime_tributario'] = self.operacao_id.regime_tributario
valores['ind_forma_pagamento'] = \
self.operacao_id.ind_forma_pagamento
if self.operacao_id.payment_term_id:
valores['payment_term_id'] = \
self.operacao_id.payment_term_id.id
valores['finalidade_nfe'] = self.operacao_id.finalidade_nfe
valores['modalidade_frete'] = self.operacao_id.modalidade_frete
valores['infadfisco'] = self.operacao_id.infadfisco
valores['infcomplementar'] = self.operacao_id.infcomplementar
valores['deduz_retencao'] = self.operacao_id.deduz_retencao
valores['pis_cofins_retido'] = self.operacao_id.pis_cofins_retido
valores['al_pis_retido'] = self.operacao_id.al_pis_retido
valores['al_cofins_retido'] = self.operacao_id.al_cofins_retido
valores['csll_retido'] = self.operacao_id.csll_retido
valores['al_csll'] = self.operacao_id.al_csll
valores['limite_retencao_pis_cofins_csll'] = (
self.operacao_id.limite_retencao_pis_cofins_csll
)
valores['irrf_retido'] = self.operacao_id.irrf_retido
valores['irrf_retido_ignora_limite'] = (
self.operacao_id.irrf_retido_ignora_limite
)
valores['al_irrf'] = self.operacao_id.al_irrf
valores['inss_retido'] = self.operacao_id.inss_retido
valores['consumidor_final'] = self.operacao_id.consumidor_final
valores['presenca_comprador'] = self.operacao_id.presenca_comprador
if self.operacao_id.cnae_id:
valores['cnae_id'] = self.operacao_id.cnae_id.id
valores['natureza_tributacao_nfse'] = (
self.operacao_id.natureza_tributacao_nfse
)
if self.operacao_id.servico_id:
valores['servico_id'] = self.operacao_id.servico_id.id
valores['cst_iss'] = self.operacao_id.cst_iss
return res
@api.onchange('empresa_id', 'modelo', 'emissao', 'serie', 'ambiente_nfe')
def onchange_serie(self):
res = {}
valores = {}
res['value'] = valores
if not self.empresa_id:
return res
if self.emissao != TIPO_EMISSAO_PROPRIA:
return res
if self.modelo not in (MODELO_FISCAL_NFE, MODELO_FISCAL_NFCE):
return res
ultimo_numero = self.search([
('empresa_id.cnpj_cpf', '=', self.empresa_id.cnpj_cpf),
('ambiente_nfe', '=', self.ambiente_nfe),
('emissao', '=', self.emissao),
('modelo', '=', self.modelo),
('serie', '=', self.serie.strip()),
], limit=1, order='numero desc')
valores['serie'] = self.serie.strip()
if len(ultimo_numero) == 0:
valores['numero'] = 1
else:
valores['numero'] = ultimo_numero[0].numero + 1
return res
@api.onchange('participante_id')
def onchange_participante_id(self):
res = {}
valores = {}
res['value'] = valores
#
# Quando o tipo da nota for para consumidor normal,
# mas o participante não é contribuinte, define que ele é consumidor
# final, exceto em caso de operação com estrangeiros
#
if self.consumidor_final == TIPO_CONSUMIDOR_FINAL_NORMAL:
if self.participante_id.estado != 'EX':
if self.participante_id.contribuinte == \
INDICADOR_IE_DESTINATARIO_CONTRIBUINTE:
valores['consumidor_final'] = \
TIPO_CONSUMIDOR_FINAL_CONSUMIDOR_FINAL
if self.operacao_id and self.operacao_id.preco_automatico == 'V':
if self.participante_id.transportadora_id:
valores['transportadora_id'] = \
self.participante_id.transportadora_id.id
if self.participante_id.payment_term_id:
valores['payment_term_id'] = \
self.participante_id.payment_term_id.id
return res
@api.onchange('payment_term_id', 'vr_fatura', 'vr_nf', 'data_emissao',
'duplicata_ids')
def _onchange_payment_term(self):
res = {}
valores = {}
res['value'] = valores
if not (self.payment_term_id and (self.vr_fatura or self.vr_nf) and
self.data_emissao):
return res
valor = D(self.vr_fatura or 0)
if not valor:
valor = D(self.vr_nf or 0)
#
# Para a compatibilidade com a chamada original (super), que usa
# o decorator deprecado api.one, pegamos aqui sempre o 1º elemento
# da lista que vai ser retornada
#
lista_vencimentos = self.payment_term_id.compute(valor,
self.data_emissao)[0]
duplicata_ids = [
[5, False, {}],
]
parcela = 1
for data_vencimento, valor in lista_vencimentos:
duplicata = {
'numero': str(parcela),
'data_vencimento': data_vencimento,
'valor': valor,
}
duplicata_ids.append([0, False, duplicata])
parcela += 1
valores['duplicata_ids'] = duplicata_ids
return res
def _check_permite_alteracao(self, operacao='create', dados={}):
CAMPOS_PERMITIDOS = [
'message_follower_ids',
]
for documento in self:
if documento.permite_alteracao:
continue
permite_alteracao = False
#
# Trata alguns campos que é permitido alterar depois da nota
# autorizada
#
if documento.state_nfe == SITUACAO_NFE_AUTORIZADA:
for campo in CAMPOS_PERMITIDOS:
if campo in dados:
permite_alteracao = True
break
if permite_alteracao:
continue
if operacao == 'unlink':
mensagem = \
'Não é permitido excluir este documento fiscal!'
elif operacao == 'write':
mensagem = \
'Não é permitido alterar este documento fiscal!'
elif operacao == 'create':
mensagem = \
'Não é permitido criar este documento fiscal!'
raise ValidationError(_(mensagem))
def unlink(self):
self._check_permite_alteracao(operacao='unlink')
return super(SpedDocumento, self).unlink()
def write(self, dados):
self._check_permite_alteracao(operacao='write', dados=dados)
return super(SpedDocumento, self).write(dados)
def envia_nfe(self):
pass
def cancela_nfe(self):
pass
def executa_antes_autorizar(self):
#
# Este método deve ser alterado por módulos integrados, para realizar
# tarefas de integração necessárias antes de autorizar uma NF-e
#
pass
def executa_depois_autorizar(self):
#
# Este método deve ser alterado por módulos integrados, para realizar
# tarefas de integração necessárias depois de autorizar uma NF-e,
# por exemplo, criar lançamentos financeiros, movimentações de
# estoque etc.
#
pass
def executa_antes_cancelar(self):
#
# Este método deve ser alterado por módulos integrados, para realizar
# tarefas de integração necessárias antes de autorizar uma NF-e;
# não confundir com o método _compute_permite_cancelamento, que indica
# se o botão de cancelamento vai estar disponível para o usuário na
# interface
#
pass
def executa_depois_cancelar(self):
#
# Este método deve ser alterado por módulos integrados, para realizar
# tarefas de integração necessárias depois de cancelar uma NF-e,
# por exemplo, excluir lançamentos financeiros, movimentações de
# estoque etc.
#
pass
def executa_antes_denegar(self):
#
# Este método deve ser alterado por módulos integrados, para realizar
# tarefas de integração necessárias antes de denegar uma NF-e
#
pass
def executa_depois_denegar(self):
#
# Este método deve ser alterado por módulos integrados, para realizar
# tarefas de integração necessárias depois de denegar uma NF-e,
# por exemplo, invalidar pedidos de venda e movimentações de estoque
# etc.
#
pass
def envia_email(self, mail_template):
pass
def gera_pdf(self):
pass
| agpl-3.0 | -4,344,500,498,646,205,000 | 30.356078 | 78 | 0.577578 | false |
tungvx/deploy | .google_appengine/google/appengine/ext/mapreduce/mapreduce_pipeline.py | 1 | 5809 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pipelines for mapreduce library."""
from __future__ import with_statement
import google
from appengine_pipeline.src import pipeline
from appengine_pipeline.src.pipeline import common as pipeline_common
from google.appengine.api import files
from google.appengine.ext.mapreduce import base_handler
from google.appengine.ext.mapreduce import mapper_pipeline
from google.appengine.ext.mapreduce import shuffler
MapperPipeline = mapper_pipeline.MapperPipeline
class MapPipeline(base_handler.PipelineBase):
"""Runs the map stage of MapReduce.
Iterates over input reader and outputs data into key/value format
for shuffler consumption.
Args:
job_name: mapreduce job name as string.
mapper_spec: specification of map handler function as string.
input_reader_spec: input reader specification as string.
params: mapper and input reader parameters as dict.
shards: number of shards to start as int.
Returns:
list of filenames list sharded by hash code.
"""
def run(self,
job_name,
mapper_spec,
input_reader_spec,
params,
shards=None):
yield MapperPipeline(
job_name + "-map",
mapper_spec,
input_reader_spec,
output_writer_spec=
shuffler.__name__ + "._KeyValueBlobstoreOutputWriter",
params=params,
shards=shards)
class ReducePipeline(base_handler.PipelineBase):
"""Runs the reduce stage of MapReduce.
Merge-reads input files and runs reducer function on them.
Args:
job_name: mapreduce job name as string.
reader_spec: specification of reduce function.
output_writer_spec: specification of output write to use with reduce
function.
params: mapper parameters to use as dict.
filenames: list of filenames to reduce.
Returns:
filenames from output writer.
"""
def run(self,
job_name,
reducer_spec,
output_writer_spec,
params,
filenames):
new_params = dict(params or {})
new_params.update({
"files": filenames
})
yield mapper_pipeline.MapperPipeline(
job_name + "-reduce",
reducer_spec,
shuffler.__name__ + "._MergingReader",
output_writer_spec,
new_params)
class ShufflePipeline(base_handler.PipelineBase):
"""A pipeline to sort multiple key-value files.
Args:
filenames: list of file names to sort. Files have to be of records format
defined by Files API and contain serialized file_service_pb.KeyValue
protocol messages.
Returns:
The list of filenames as string. Resulting files have the same format as
input and are sorted by key.
"""
def run(self, shards):
result = []
shuffled_shards = [[] for _ in shards[0]]
for shard in shards:
for i, filename in enumerate(shard):
shuffled_shards[i].append(filename)
for filenames in shuffled_shards:
sorted_files = yield shuffler.SortPipeline(filenames)
result.append(sorted_files)
yield pipeline_common.Append(*result)
class CleanupPipeline(base_handler.PipelineBase):
"""A pipeline to do a cleanup for mapreduce jobs.
Args:
temp_files: list of lists of temporary files generated by mapreduce
job to delete.
"""
def run(self, temp_files):
for shard in temp_files:
for filename in shard:
for _ in range(10):
try:
files.delete(filename)
break
except:
pass
class MapreducePipeline(base_handler.PipelineBase):
"""Pipeline to execute MapReduce jobs.
Args:
job_name: job name as string.
mapper_spec: specification of mapper to use.
reader_spec: specification of reducer to use.
input_reader_spec: specification of input reader to read data from.
output_writer_spec: specification of output writer to save reduce output to.
mapper_params: parameters to use for mapper phase.
reducer_params: parameters to use for reduce phase.
shards: number of shards to use as int.
Returns:
filenames from output writer.
"""
def run(self,
job_name,
mapper_spec,
reducer_spec,
input_reader_spec,
output_writer_spec=None,
mapper_params=None,
reducer_params=None,
shards=None):
map_pipeline = yield MapPipeline(job_name,
mapper_spec,
input_reader_spec,
params=mapper_params,
shards=shards)
shuffler_pipeline = yield ShufflePipeline(map_pipeline)
reducer_pipeline = yield ReducePipeline(job_name,
reducer_spec,
output_writer_spec,
reducer_params,
shuffler_pipeline)
with pipeline.After(reducer_pipeline):
all_temp_files = yield pipeline_common.Extend(
map_pipeline, shuffler_pipeline)
yield CleanupPipeline(all_temp_files)
yield pipeline_common.Return(reducer_pipeline)
| apache-2.0 | -3,015,711,878,670,997,000 | 26.661905 | 80 | 0.646583 | false |
manassolanki/erpnext | erpnext/education/api.py | 1 | 12440 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.utils import flt, cstr
from frappe.email.doctype.email_group.email_group import add_subscribers
def get_course(program):
'''Return list of courses for a particular program
:param program: Program
'''
courses = frappe.db.sql('''select course, course_name from `tabProgram Course` where parent=%s''',
(program), as_dict=1)
return courses
@frappe.whitelist()
def enroll_student(source_name):
"""Creates a Student Record and returns a Program Enrollment.
:param source_name: Student Applicant.
"""
frappe.publish_realtime('enroll_student_progress', {"progress": [1, 4]}, user=frappe.session.user)
student = get_mapped_doc("Student Applicant", source_name,
{"Student Applicant": {
"doctype": "Student",
"field_map": {
"name": "student_applicant"
}
}}, ignore_permissions=True)
student.save()
program_enrollment = frappe.new_doc("Program Enrollment")
program_enrollment.student = student.name
program_enrollment.student_name = student.title
program_enrollment.program = frappe.db.get_value("Student Applicant", source_name, "program")
frappe.publish_realtime('enroll_student_progress', {"progress": [4, 4]}, user=frappe.session.user)
return program_enrollment
@frappe.whitelist()
def check_attendance_records_exist(course_schedule=None, student_group=None, date=None):
"""Check if Attendance Records are made against the specified Course Schedule or Student Group for given date.
:param course_schedule: Course Schedule.
:param student_group: Student Group.
:param date: Date.
"""
if course_schedule:
return frappe.get_list("Student Attendance", filters={"course_schedule": course_schedule})
else:
return frappe.get_list("Student Attendance", filters={"student_group": student_group, "date": date})
@frappe.whitelist()
def mark_attendance(students_present, students_absent, course_schedule=None, student_group=None, date=None):
"""Creates Multiple Attendance Records.
:param students_present: Students Present JSON.
:param students_absent: Students Absent JSON.
:param course_schedule: Course Schedule.
:param student_group: Student Group.
:param date: Date.
"""
present = json.loads(students_present)
absent = json.loads(students_absent)
for d in present:
make_attendance_records(d["student"], d["student_name"], "Present", course_schedule, student_group, date)
for d in absent:
make_attendance_records(d["student"], d["student_name"], "Absent", course_schedule, student_group, date)
frappe.db.commit()
frappe.msgprint(_("Attendance has been marked successfully."))
def make_attendance_records(student, student_name, status, course_schedule=None, student_group=None, date=None):
"""Creates/Update Attendance Record.
:param student: Student.
:param student_name: Student Name.
:param course_schedule: Course Schedule.
:param status: Status (Present/Absent)
"""
student_attendance_list = frappe.get_list("Student Attendance", fields = ['name'], filters = {
"student": student,
"course_schedule": course_schedule,
"student_group": student_group,
"date": date
})
if student_attendance_list:
student_attendance = frappe.get_doc("Student Attendance", student_attendance_list[0])
else:
student_attendance = frappe.new_doc("Student Attendance")
student_attendance.student = student
student_attendance.student_name = student_name
student_attendance.course_schedule = course_schedule
student_attendance.student_group = student_group
student_attendance.date = date
student_attendance.status = status
student_attendance.save()
@frappe.whitelist()
def get_student_guardians(student):
"""Returns List of Guardians of a Student.
:param student: Student.
"""
guardians = frappe.get_list("Student Guardian", fields=["guardian"] ,
filters={"parent": student})
return guardians
@frappe.whitelist()
def get_student_group_students(student_group, include_inactive=0):
"""Returns List of student, student_name in Student Group.
:param student_group: Student Group.
"""
if include_inactive:
students = frappe.get_list("Student Group Student", fields=["student", "student_name"] ,
filters={"parent": student_group}, order_by= "group_roll_number")
else:
students = frappe.get_list("Student Group Student", fields=["student", "student_name"] ,
filters={"parent": student_group, "active": 1}, order_by= "group_roll_number")
return students
@frappe.whitelist()
def get_fee_structure(program, academic_term=None):
"""Returns Fee Structure.
:param program: Program.
:param academic_term: Academic Term.
"""
fee_structure = frappe.db.get_values("Fee Structure", {"program": program,
"academic_term": academic_term}, 'name', as_dict=True)
return fee_structure[0].name if fee_structure else None
@frappe.whitelist()
def get_fee_components(fee_structure):
"""Returns Fee Components.
:param fee_structure: Fee Structure.
"""
if fee_structure:
fs = frappe.get_list("Fee Component", fields=["fees_category", "amount"] , filters={"parent": fee_structure}, order_by= "idx")
return fs
@frappe.whitelist()
def get_fee_schedule(program, student_category=None):
"""Returns Fee Schedule.
:param program: Program.
:param student_category: Student Category
"""
fs = frappe.get_list("Program Fee", fields=["academic_term", "fee_structure", "due_date", "amount"] ,
filters={"parent": program, "student_category": student_category }, order_by= "idx")
return fs
@frappe.whitelist()
def collect_fees(fees, amt):
paid_amount = flt(amt) + flt(frappe.db.get_value("Fees", fees, "paid_amount"))
total_amount = flt(frappe.db.get_value("Fees", fees, "total_amount"))
frappe.db.set_value("Fees", fees, "paid_amount", paid_amount)
frappe.db.set_value("Fees", fees, "outstanding_amount", (total_amount - paid_amount))
return paid_amount
@frappe.whitelist()
def get_course_schedule_events(start, end, filters=None):
"""Returns events for Course Schedule Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Course Schedule", filters)
data = frappe.db.sql("""select name, course, color,
timestamp(schedule_date, from_time) as from_datetime,
timestamp(schedule_date, to_time) as to_datetime,
room, student_group, 0 as 'allDay'
from `tabCourse Schedule`
where ( schedule_date between %(start)s and %(end)s )
{conditions}""".format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return data
@frappe.whitelist()
def get_assessment_criteria(course):
"""Returns Assessmemt Criteria and their Weightage from Course Master.
:param Course: Course
"""
return frappe.get_list("Course Assessment Criteria", \
fields=["assessment_criteria", "weightage"], filters={"parent": course}, order_by= "idx")
@frappe.whitelist()
def get_assessment_students(assessment_plan, student_group):
student_list = get_student_group_students(student_group)
for i, student in enumerate(student_list):
result = get_result(student.student, assessment_plan)
if result:
student_result = {}
for d in result.details:
student_result.update({d.assessment_criteria: [cstr(d.score), d.grade]})
student_result.update({
"total_score": [cstr(result.total_score), result.grade],
"comment": result.comment
})
student.update({
"assessment_details": student_result,
"docstatus": result.docstatus,
"name": result.name
})
else:
student.update({'assessment_details': None})
return student_list
@frappe.whitelist()
def get_assessment_details(assessment_plan):
"""Returns Assessment Criteria and Maximum Score from Assessment Plan Master.
:param Assessment Plan: Assessment Plan
"""
return frappe.get_list("Assessment Plan Criteria", \
fields=["assessment_criteria", "maximum_score", "docstatus"], filters={"parent": assessment_plan}, order_by= "idx")
@frappe.whitelist()
def get_result(student, assessment_plan):
"""Returns Submitted Result of given student for specified Assessment Plan
:param Student: Student
:param Assessment Plan: Assessment Plan
"""
results = frappe.get_all("Assessment Result", filters={"student": student,
"assessment_plan": assessment_plan, "docstatus": ("!=", 2)})
if results:
return frappe.get_doc("Assessment Result", results[0])
else:
return None
@frappe.whitelist()
def get_grade(grading_scale, percentage):
"""Returns Grade based on the Grading Scale and Score.
:param Grading Scale: Grading Scale
:param Percentage: Score Percentage Percentage
"""
grading_scale_intervals = {}
if not hasattr(frappe.local, 'grading_scale'):
grading_scale = frappe.get_all("Grading Scale Interval", fields=["grade_code", "threshold"], filters={"parent": grading_scale})
frappe.local.grading_scale = grading_scale
for d in frappe.local.grading_scale:
grading_scale_intervals.update({d.threshold:d.grade_code})
intervals = sorted(grading_scale_intervals.keys(), key=float, reverse=True)
for interval in intervals:
if flt(percentage) >= interval:
grade = grading_scale_intervals.get(interval)
break
else:
grade = ""
return grade
@frappe.whitelist()
def mark_assessment_result(assessment_plan, scores):
student_score = json.loads(scores);
assessment_details = []
for criteria in student_score.get("assessment_details"):
assessment_details.append({
"assessment_criteria": criteria,
"score": flt(student_score["assessment_details"][criteria])
})
assessment_result = get_assessment_result_doc(student_score["student"], assessment_plan)
assessment_result.update({
"student": student_score.get("student"),
"assessment_plan": assessment_plan,
"comment": student_score.get("comment"),
"total_score":student_score.get("total_score"),
"details": assessment_details
})
assessment_result.save()
details = {}
for d in assessment_result.details:
details.update({d.assessment_criteria: d.grade})
assessment_result_dict = {
"name": assessment_result.name,
"student": assessment_result.student,
"total_score": assessment_result.total_score,
"grade": assessment_result.grade,
"details": details
}
return assessment_result_dict
@frappe.whitelist()
def submit_assessment_results(assessment_plan, student_group):
total_result = 0
student_list = get_student_group_students(student_group)
for i, student in enumerate(student_list):
doc = get_result(student.student, assessment_plan)
if doc and doc.docstatus==0:
total_result += 1
doc.submit()
return total_result
def get_assessment_result_doc(student, assessment_plan):
assessment_result = frappe.get_all("Assessment Result", filters={"student": student,
"assessment_plan": assessment_plan, "docstatus": ("!=", 2)})
if assessment_result:
doc = frappe.get_doc("Assessment Result", assessment_result[0])
if doc.docstatus == 0:
return doc
elif doc.docstatus == 1:
frappe.msgprint(_("Result already Submitted"))
return None
else:
return frappe.new_doc("Assessment Result")
@frappe.whitelist()
def update_email_group(doctype, name):
if not frappe.db.exists("Email Group", name):
email_group = frappe.new_doc("Email Group")
email_group.title = name
email_group.save()
email_list = []
students = []
if doctype == "Student Group":
students = get_student_group_students(name)
for stud in students:
for guard in get_student_guardians(stud.student):
email = frappe.db.get_value("Guardian", guard.guardian, "email_address")
if email:
email_list.append(email)
add_subscribers(name, email_list)
@frappe.whitelist()
def get_current_enrollment(student, academic_year=None):
current_academic_year = academic_year or frappe.defaults.get_defaults().academic_year
program_enrollment_list = frappe.db.sql('''
select
name as program_enrollment, student_name, program, student_batch_name as student_batch,
student_category, academic_term, academic_year
from
`tabProgram Enrollment`
where
student = %s and academic_year = %s
order by creation''', (student, current_academic_year), as_dict=1)
if program_enrollment_list:
return program_enrollment_list[0]
else:
return None
| gpl-3.0 | 7,010,205,001,467,477,000 | 31.997347 | 129 | 0.727251 | false |
Strepto/sumatrapdf | scripts/obsolete/buildbot-obsolete.py | 1 | 22375 | """
Builds sumatra and uploads results to s3 for easy analysis, viewable at:
https://kjkpub.s3.amazonaws.com/sumatrapdf/buildbot/index.html
"""
import sys
import os
# assumes is being run as ./scripts/buildbot.py
efi_scripts_dir = os.path.join("tools", "efi")
sys.path.append(efi_scripts_dir)
import shutil
import time
import datetime
import cPickle
import traceback
import s3
import util
import util2
import efiparse
import build
from util import file_remove_try_hard, run_cmd_throw, pretty_print_secs
from util import Serializable, create_dir
from util import load_config, run_cmd, strip_empty_lines
from util import verify_path_exists, verify_started_in_right_directory
from buildbot_html import gen_analyze_html, build_index_html, rebuild_trans_src_path_cache
from buildbot_html import build_sizes_json, g_first_analyze_build
import runtests
"""
TODO:
- diff for symbols in html format
- upload efi html diff as part of buildbot
MAYBE:
- aggressive optimization cause symbol churn which makes reading efi output
hard. One option would be to run efi on an executable compiled with less
aggressive optimization. Another would be to post-process the result
and use heuristic to suppress bogus changes
"""
class Stats(Serializable):
fields = {
"analyze_sumatra_warnings_count": 0,
"analyze_mupdf_warnings_count": 0,
"analyze_ext_warnings_count": 0,
"rel_sumatrapdf_exe_size": 0,
"rel_sumatrapdf_no_mupdf_exe_size": 0,
"rel_installer_exe_size": 0,
"rel_libmupdf_dll_size": 0,
"rel_nppdfviewer_dll_size": 0,
"rel_pdffilter_dll_size": 0,
"rel_pdfpreview_dll_size": 0,
"rel_failed": False,
"rel_build_log": "",
"analyze_out": "",
}
fields_no_serialize = ["rel_build_log", "analyze_out"]
def __init__(self, read_from_file=None):
Serializable.__init__(self, Stats.fields,
Stats.fields_no_serialize, read_from_file)
def file_size(p):
return os.path.getsize(p)
def str2bool(s):
if s.lower() in ("true", "1"):
return True
if s.lower() in ("false", "0"):
return False
assert(False)
TIME_BETWEEN_PRE_RELEASE_BUILDS_IN_SECS = 60 * 60 * 8 # 8hrs
g_time_of_last_build = None
g_cache_dir = create_dir(
os.path.realpath(os.path.join("..", "sumatrapdfcache", "buildbot")))
g_stats_cache_dir = create_dir(os.path.join(g_cache_dir, "stats"))
g_logs_cache_dir = create_dir(os.path.join(g_cache_dir, "logs"))
def get_cache_dir():
return g_cache_dir
def get_stats_cache_dir():
return g_stats_cache_dir
def get_logs_cache_dir():
return g_logs_cache_dir
@util2.memoize
def cert_path():
scripts_dir = os.path.realpath(os.path.dirname(__file__))
cert_path = os.path.join(scripts_dir, "cert.pfx")
return verify_path_exists(cert_path)
def logs_efi_out_path(ver):
return os.path.join(get_logs_cache_dir(), str(ver) + "_efi.txt.bz2")
# logs are only kept for potential troubleshooting and they're quite big,
# so we delete old files (we keep logs for the last $to_keep revisions)
def delete_old_logs(to_keep=10):
files = os.listdir(get_logs_cache_dir())
versions = []
for f in files:
ver = int(f.split("_")[0])
if ver not in versions:
versions.append(ver)
versions.sort(reverse=True)
if len(versions) <= to_keep:
return
to_delete = versions[to_keep:]
for f in files:
ver = int(f.split("_")[0])
if ver in to_delete:
p = os.path.join(get_logs_cache_dir(), f)
os.remove(p)
# return Stats object or None if we don't have it for this version
def stats_for_ver(ver):
local_path = os.path.join(get_stats_cache_dir(), ver + ".txt")
if not os.path.exists(local_path):
s3_path = "sumatrapdf/buildbot/%s/stats.txt" % ver
if not s3.exists(s3_path):
return None
s3.download_to_file(s3_path, local_path)
assert(os.path.exists(local_path))
return Stats(local_path)
def previous_successful_build_ver(ver):
ver = int(ver) - 1
while True:
stats = stats_for_ver(str(ver))
if None == stats:
return 0
if not stats.rel_failed:
return ver
ver -= 1
# We cache results of running svn log in a dict mapping
# version to string returned by svn log
g_svn_log_per_ver = None
def load_svn_log_data():
try:
path = os.path.join(get_cache_dir(), "snv_log.dat")
fo = open(path, "rb")
except IOError:
# it's ok if doesn't exist
return {}
try:
res = cPickle.load(fo)
fo.close()
return res
except:
fo.close()
file_remove_try_hard(path)
return {}
def save_svn_log_data(data):
p = os.path.join(get_cache_dir(), "snv_log.dat")
fo = open(p, "wb")
cPickle.dump(data, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
def checkin_comment_for_ver(ver):
global g_svn_log_per_ver
raise BaseException("NYI for git")
ver = str(ver)
if g_svn_log_per_ver is None:
g_svn_log_per_ver = load_svn_log_data()
if ver not in g_svn_log_per_ver:
# TODO: retry few times to make it robust against temporary network
# failures
(out, err) = run_cmd_throw("svn", "log", "-r%s" % ver, "-v")
g_svn_log_per_ver[ver] = out
save_svn_log_data(g_svn_log_per_ver)
s = g_svn_log_per_ver[ver]
res = parse_svnlog_out(s)
if res is None:
return "not a source code change"
return res[1]
# return true if we already have results for a given build number in s3
def has_already_been_built(ver):
s3_dir = "sumatrapdf/buildbot/"
n1 = s3_dir + ver + "/analyze.html"
n2 = s3_dir + ver + "/release_build_log.txt"
keys = s3.list(s3_dir)
for k in keys:
if k.name in [n1, n2]:
return True
return False
def verify_efi_present():
try:
(out, err, errcode) = util.run_cmd("efi.exe")
except:
print("Must have efi.exe in the %PATH%!!!")
sys.exit(1)
if "Usage:" not in out:
print("efi.exe created unexpected output:\n%s" % out)
sys.exit(1)
def file_size_in_obj(file_name, defSize=None):
p = os.path.join("obj-rel", file_name)
if not os.path.exists(p) and defSize is not None:
return defSize
return file_size(p)
def clean_release():
shutil.rmtree("obj-rel", ignore_errors=True)
shutil.rmtree("vs-premake", ignore_errors=True)
shutil.rmtree(os.path.join("mupdf", "generated"), ignore_errors=True)
def build_release(stats, ver):
config = "CFG=rel"
obj_dir = "obj-rel"
extcflags = "EXTCFLAGS=-DSVN_PRE_RELEASE_VER=%s" % ver
platform = "PLATFORM=X86"
clean_release()
(out, err, errcode) = run_cmd("nmake", "-f", "makefile.msvc",
config, extcflags, platform,
"all_sumatrapdf")
log_path = os.path.join(get_logs_cache_dir(), ver + "_rel_log.txt")
build_log = out + "\n====STDERR:\n" + err
build_log = strip_empty_lines(build_log)
open(log_path, "w").write(build_log)
stats.rel_build_log = ""
stats.rel_failed = False
if errcode != 0:
stats.rel_build_log = build_log
stats.rel_failed = True
return
stats.rel_sumatrapdf_exe_size = file_size_in_obj("SumatraPDF.exe")
stats.rel_sumatrapdf_no_mupdf_exe_size = file_size_in_obj(
"SumatraPDF-no-MuPDF.exe")
stats.rel_libmupdf_dll_size = file_size_in_obj("libmupdf.dll")
stats.rel_nppdfviewer_dll_size = file_size_in_obj("npPdfViewer.dll", 0)
stats.rel_pdffilter_dll_size = file_size_in_obj("PdfFilter.dll")
stats.rel_pdfpreview_dll_size = file_size_in_obj("PdfPreview.dll")
stats.rel_installer_exe_size = file_size_in_obj("Installer.exe")
def build_analyze(stats, ver):
config = "CFG=rel"
obj_dir = "obj-rel"
extcflags = "EXTCFLAGS=-DSVN_PRE_RELEASE_VER=%s" % ver
platform = "PLATFORM=X86"
shutil.rmtree(obj_dir, ignore_errors=True)
shutil.rmtree(os.path.join("mupdf", "generated"), ignore_errors=True)
(out, err, errcode) = run_cmd("nmake", "-f", "makefile.msvc",
"WITH_ANALYZE=yes", config, extcflags, platform, "all_sumatrapdf")
stats.analyze_out = out
log_path = os.path.join(get_logs_cache_dir(), ver + "_analyze_log.txt")
s = out + "\n====STDERR:\n" + err
open(log_path, "w").write(strip_empty_lines(s))
def svn_update_to_ver(ver):
run_cmd_throw("svn", "update", "-r" + ver)
rebuild_trans_src_path_cache()
# runs efi.exe on obj-rel/SumatraPDF.exe, stores the data in obj-rel/efi.txt.bz2
# and uploads to s3 as efi.txt.bz2
def build_and_upload_efi_out(ver):
obj_dir = "obj-rel"
s3dir = "sumatrapdf/buildbot/%s/" % ver
os.chdir(obj_dir)
util.run_cmd_throw("efi", "SumatraPDF.exe", ">efi.txt")
util.bz_file_compress("efi.txt", "efi.txt.bz2")
s3.upload_file_public("efi.txt.bz2", s3dir + "efi.txt.bz2", silent=True)
shutil.copyfile("efi.txt.bz2", logs_efi_out_path(ver))
os.chdir("..")
def get_efi_out(ver):
ver = str(ver)
p = logs_efi_out_path(ver)
if os.path.exists(p):
return p
# TODO: try download from s3 if doesn't exist? For now we rely on the fact
# that it was build on this machine, so the results should still be in logs
# cache
return None
def efi_diff_as_txt(diff, max=-1):
lines = []
diff.added.sort(key=lambda sym: sym.size, reverse=True)
diff.removed.sort(key=lambda sym: sym.size, reverse=True)
diff.changed.sort(key=lambda sym: sym.size_diff, reverse=True)
added = diff.added
if len(added) > 0:
lines.append("\nAdded symbols:")
if max != -1:
added = added[:max]
for sym in added:
#sym = diff.syms2.name_to_sym[sym_name]
size = sym.size
s = "%4d : %s" % (size, sym.full_name())
lines.append(s)
removed = diff.removed
if len(removed) > 0:
lines.append("\nRemoved symbols:")
if max != -1:
removed = removed[:max]
for sym in removed:
#sym = diff.syms2.name_to_sym[sym_name]
size = sym.size
s = "%4d : %s" % (size, sym.full_name())
lines.append(s)
changed = diff.changed
if len(changed) > 0:
lines.append("\nChanged symbols:")
if max != -1:
changed = changed[:max]
for sym in changed:
size = sym.size_diff
lines.append("%4d : %s" % (size, sym.full_name()))
return "\n".join(lines)
# builds efi diff between this version and previous succesful version
# and uploads as efi_diff.txt
def build_and_upload_efi_txt_diff(ver):
prev_ver = previous_successful_build_ver(ver)
if 0 == prev_ver:
return
efi_path_curr = get_efi_out(ver)
if not efi_path_curr:
print("didn't find efi output for %s" % str(ver))
return
efi_path_prev = get_efi_out(prev_ver)
if not efi_path_prev:
print("didn't find efi output for %s" % str(prev_ver))
return
obj_file_splitters = ["obj-rel\\", "INTEL\\"]
efi1 = efiparse.parse_file(efi_path_prev, obj_file_splitters)
efi2 = efiparse.parse_file(efi_path_curr, obj_file_splitters)
diff = efiparse.diff(efi1, efi2)
s = str(diff)
s = s + "\n" + efi_diff_as_txt(diff)
s = ""
s3dir = "sumatrapdf/buildbot/%s/" % str(ver)
s3.upload_data_public_with_content_type(
s, s3dir + "efi_diff.txt", silent=True)
# TODO: maybe add debug build and 64bit release?
# skip_release is just for testing
def build_version(ver, skip_release=False):
print("Building version %s" % ver)
clean_release()
# a hack: checkin_comment_for_ver() might call svn log, which doesn't like
# unversioning directories (like obj-rel or vs-premake), so we call it here,
# after clean, to cache the result
checkin_comment_for_ver(ver)
svn_update_to_ver(ver)
s3dir = "sumatrapdf/buildbot/%s/" % ver
stats = Stats()
# only run /analyze on newer builds since we didn't have the necessary
# makefile logic before
run_analyze = int(ver) >= g_first_analyze_build
if not skip_release:
start_time = datetime.datetime.now()
build_release(stats, ver)
dur = datetime.datetime.now() - start_time
print("%s for release build" % str(dur))
if stats.rel_failed:
# don't bother running analyze if release failed
run_analyze = False
s3.upload_data_public_with_content_type(
stats.rel_build_log, s3dir + "release_build_log.txt", silent=True)
if not stats.rel_failed:
build_and_upload_efi_out(ver)
if run_analyze:
start_time = datetime.datetime.now()
build_analyze(stats, ver)
dur = datetime.datetime.now() - start_time
print("%s for analyze build" % str(dur))
html = gen_analyze_html(stats, ver)
p = os.path.join(get_logs_cache_dir(), "%s_analyze.html" % str(ver))
open(p, "w").write(html)
s3.upload_data_public_with_content_type(
html, s3dir + "analyze.html", silent=True)
if not stats.rel_failed:
build_and_upload_efi_txt_diff(ver)
# TODO: it appears we might throw an exception after uploading analyze.html but
# before/dufing uploading stats.txt. Would have to implement transactional
# multi-upload to be robust aginst that, so will just let it be
stats_txt = stats.to_s()
s3.upload_data_public_with_content_type(
stats_txt, s3dir + "stats.txt", silent=True)
html = build_index_html(stats_for_ver, checkin_comment_for_ver)
s3.upload_data_public_with_content_type(
html, "sumatrapdf/buildbot/index.html", silent=True)
json_s = build_sizes_json(get_stats_cache_dir, stats_for_ver)
s3.upload_data_public_with_content_type(
json_s, "sumatrapdf/buildbot/sizes.js", silent=True)
if stats.rel_failed:
email_build_failed(ver)
return # don't run tests if build fails
# TODO: can't run tests anymore because premake4 only generates
# vs 2010 solution, which can't be executed by vs 2013
#err = runtests.run_tests()
err = None
if err != None:
s3.upload_data_public_with_content_type(
err, s3dir + "tests_error.txt", silent=True)
email_tests_failed(ver, err)
print("Tests failed. Error message:\n" + err)
else:
print("Tests passed!")
def test_build_html_index():
print("test_build_html_index()")
html = build_index_html(stats_for_ver, checkin_comment_for_ver)
print("after build_index_html()")
import codecs
codecs.open("index.html", "w", "utf8").write(html)
print("after write")
sys.exit(1)
g_email_to = ["[email protected]", "[email protected]"]
def email_tests_failed(ver, err):
s3_url_start = "https://kjkpub.s3.amazonaws.com/sumatrapdf/buildbot/"
c = load_config()
if not c.HasNotifierEmail():
print("email_tests_failed() not ran because not c.HasNotifierEmail()")
return
sender, senderpwd = c.GetNotifierEmailAndPwdMustExist()
subject = "SumatraPDF tests failed for build %s" % str(ver)
checkin_url = "https://code.google.com/p/sumatrapdf/source/detail?r=%s" % str(ver)
body = "Checkin: %s\n\n" % checkin_url
log_url = s3_url_start + str(ver) + "/tests_error.txt"
body += "Build log: %s\n\n" % log_url
buildbot_index_url = s3_url_start + "index.html"
body += "Buildbot: %s\n\n" % buildbot_index_url
body += "Error: %s\n\n" % err
util.sendmail(sender, senderpwd, g_email_to, subject, body)
def email_msg(msg):
c = load_config()
if not c.HasNotifierEmail():
print("email_build_failed() not ran because not c.HasNotifierEmail()")
return
sender, senderpwd = c.GetNotifierEmailAndPwdMustExist()
subject = "SumatraPDF buildbot failed"
util.sendmail(sender, senderpwd, ["[email protected]"], subject, msg)
def email_build_failed(ver):
s3_url_start = "https://kjkpub.s3.amazonaws.com/sumatrapdf/buildbot/"
c = load_config()
if not c.HasNotifierEmail():
print("email_build_failed() not ran because not c.HasNotifierEmail()")
return
sender, senderpwd = c.GetNotifierEmailAndPwdMustExist()
subject = "SumatraPDF build %s failed" % str(ver)
checkin_url = "https://code.google.com/p/sumatrapdf/source/detail?r=%s" % str(ver)
body = "Checkin: %s\n\n" % checkin_url
build_log_url = s3_url_start + str(ver) + "/release_build_log.txt"
body += "Build log: %s\n\n" % build_log_url
buildbot_index_url = s3_url_start + "index.html"
body += "Buildbot: %s\n\n" % buildbot_index_url
util.sendmail(sender, senderpwd, g_email_to, subject, body)
# for testing
def build_curr(force=False):
raise BaseException("NYI for git")
(local_ver, latest_ver) = util.get_svn_versions()
print("local ver: %s, latest ver: %s" % (local_ver, latest_ver))
if not has_already_been_built(local_ver) or force:
build_version(local_ver)
else:
print("We have already built revision %s" % local_ver)
def build_version_retry(ver, try_count=2):
# it can happen we get a valid but intermitten exception e.g.
# due to svn command failing due to server hiccup
# in that case we'll retry, waiting 1 min in between,
# but only up to try_count times
while True:
try:
build_version(ver)
except Exception, e:
# rethrow assert() exceptions, they come from our code
# and we should stop
if isinstance(e, AssertionError):
print("assert happened:")
print(str(e))
traceback.print_exc()
raise e
print(str(e))
traceback.print_exc()
try_count -= 1
if 0 == try_count:
raise
time.sleep(60)
return
def buildbot_loop():
global g_time_of_last_build
while True:
# util.get_svn_versions() might throw an exception due to
# temporary network problems, so retry
try:
(local_ver, latest_ver) = util.get_svn_versions()
except:
print("get_svn_versions() threw an exception")
time.sleep(120)
continue
print("local ver: %s, latest ver: %s" % (local_ver, latest_ver))
revs_built = 0
while int(local_ver) <= int(latest_ver):
if not has_already_been_built(local_ver):
build_version_retry(local_ver)
revs_built += 1
else:
print("We have already built revision %s" % local_ver)
local_ver = str(int(local_ver) + 1)
delete_old_logs()
# don't sleep if we built something in this cycle. a new checkin might
# have happened while we were working
if revs_built > 0:
g_time_of_last_build = datetime.datetime.now()
continue
secs_until_prerelease = None
if g_time_of_last_build is not None:
td = datetime.datetime.now() - g_time_of_last_build
secs_until_prerelease = TIME_BETWEEN_PRE_RELEASE_BUILDS_IN_SECS - \
int(td.total_seconds())
if secs_until_prerelease < 0:
build_pre_release()
g_time_of_last_build = None
if secs_until_prerelease is None:
print("Sleeping for 15 minutes to wait for new checkin")
else:
print("Sleeping for 15 minutes, %s until pre-release" %
pretty_print_secs(secs_until_prerelease))
time.sleep(60 * 15) # 15 mins
def ignore_pre_release_build_error(s):
# it's possible we did a pre-release build outside of buildbot and that
# shouldn't be a fatal error
if "already exists in s3" in s:
return True
return False
def build_pre_release():
try:
cert_dst_path = os.path.join("scripts", "cert.pfx")
if not os.path.exists(cert_dst_path):
shutil.copyfile(cert_path(), cert_dst_path)
print("Building pre-release")
build.build_pre_release()
except BaseException, e:
s = str(e)
print(s)
# a bit of a hack. not every kind of failure should stop the buildbot
if not ignore_pre_release_build_error(s):
traceback.print_exc()
raise
def test_email_tests_failed():
email_tests_failed("200", "hello")
sys.exit(1)
def verify_can_send_email():
c = load_config()
if not c.HasNotifierEmail():
print("can't run. scripts/config.py missing notifier_email and/or notifier_email_pwd")
sys.exit(1)
def main():
verify_can_send_email()
cert_path() # early check and ensures value is memoized
verify_efi_present()
verify_started_in_right_directory()
# to avoid problems, we build a separate source tree, just for the buildbot
src_path = os.path.join("..", "sumatrapdf_buildbot")
verify_path_exists(src_path)
conf = load_config()
s3.set_secrets(conf.aws_access, conf.aws_secret)
s3.set_bucket("kjkpub")
os.chdir(src_path)
# test_email_tests_failed()
#build_version("8190", skip_release=True)
# test_build_html_index()
# build_sizes_json()
# build_curr(force=True)
# TODO: add a try/catch and e-mail if failed for unexpected reasons
buildbot_loop()
if __name__ == "__main__":
try:
main()
except Exception, e:
msg = "buildbot failed\nException: " + str(e) + "\n"
email_msg(msg)
| gpl-3.0 | -7,628,664,021,878,116,000 | 32.056317 | 100 | 0.59857 | false |
google-research/meta-dataset | meta_dataset/analyze.py | 1 | 27009 | # coding=utf-8
# Copyright 2021 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
r"""Script for aggregating the eval summaries into dicts.
This script assumes that the evaluation has already been ran (and has produced
the eval summaries from which this script reads).
Creates four dicts: One maps each observed 'shot' to a list of class precisions
obtained by classes that had that shot (regardless of shots of other classes in
the same episode). The second maps each observed 'way' of an episode to a list
of accuracies of the episodes with that way. The third maps each observed height
(of the lowest common ancestor of pairs of leaves corresponding to the Synsets
of ImageNet binary classification tasks from the training subgraph) to the
accuracy of those tasks, aiming to study how the fine- or coarse- grainedness of
a task affects its difficulty. The fourth maps each observed degree of imbalance
(w.r.t the numbers of shots of the different classes in the task) to the
accuracy of the corresponding episodes.
Summarized versions are also created that keep only the mean and confidence
intervals instead of the list of all precisons or accuracies, resp. as the
values of these dicts.
Sample command:
# pylint: disable=line-too-long
python -m meta_dataset.analyze \
--alsologtostderr \
--eval_finegrainedness \
--eval_finegrainedness_split=test \
--root_dir=<root_dir> \
# pylint: enable=line-too-long
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl import logging
from meta_dataset.data import dataset_spec
from meta_dataset.data import imagenet_specification as imagenet_spec
from meta_dataset.data import learning_spec
import numpy as np
from six.moves import range
import six.moves.cPickle as pkl
import tensorflow.compat.v1 as tf
FLAGS = tf.flags.FLAGS
# Will search for all event files in this root dir.
tf.flags.DEFINE_string(
'root_dir',
'',
'The root '
'directory to look in for sub-directory trees containing event files.')
tf.flags.DEFINE_bool(
'eval_imbalance', False, 'Whether to perform only 2-way evaluation for '
'assessing performance as a function of how imbalanced each task is.')
tf.flags.DEFINE_bool(
'eval_finegrainedness', False, 'Whether to perform only 2-way ImageNet '
'evaluation for assessing performance as a function of how finegrained '
'each task is. This differs from usual ImageNet eval in the sampling '
'procedure used to get episodes, and therefore requires its own setting.')
tf.flags.DEFINE_enum(
'eval_finegrainedness_split', 'test', ['train', 'valid', 'test'], 'The '
'split whose results we want to use for the fine-grainedness analysis.'
'Contrary to most analyses which are performed on the test split only, the '
'fine-grainedness analysis may also be performed on the train or valid '
'sub-graphs of ImageNet too, since the test sub-graph evidently does not '
'exhibit enough variation in the fine-grainedness of its different tasks '
'to allow for a meaningful analysis.')
# To restrict to evaluating on ImageNet, the following should be set to '2'.
# The valid sub-experiment id's start from '1'.
# TODO(etriantafillou): Adapt the following for external users. In particular,
# we shouldn't necessarily assume the directory structure where there is one
# directory per experiment id, which corresponds to different hyperparams.
tf.flags.DEFINE_enum(
'restrict_to_subexperiment', '0', [str(num) for num in range(11)], 'If '
'positive, restricts to using the summaries in the sub-experiment whose id '
'is the given number. This corresponds to a specific hyper (e.g. choice of '
'evaluation dataset). Valid experiment ids start from "1".')
tf.flags.DEFINE_bool(
'force_recompute', False, 'Whether to always re-compute (and overwrite) '
'the dictionaries regardless of whether they have already been computed.')
tf.flags.DEFINE_string('records_root_dir', '',
'Root directory containing a subdirectory per dataset.')
FLAGS = tf.flags.FLAGS
def compute_class_precision(class_id, logits, targets):
"""Computes the precision for class_id.
The precision for a class is defined as the number of examples of that class
that are correctly classified over its total number of examples.
Args:
class_id: An int, in the range between 0 and the number of classes.
logits: A float array, of shape [num_test_examples, num_classes].
targets: An int array, of the same shape as logits.
Returns:
precision: A float. The precision for the given class.
"""
# Get the section of the logits that correspond to class_id.
class_logits_ids = np.where(targets == class_id)[0]
# [# test examples of class_id, way].
class_logits = logits[class_logits_ids]
# [# test examples of class_id]
class_preds = np.argmax(class_logits, axis=1)
precision = np.mean(np.equal(class_preds, class_id))
return precision
def compute_episode_accuracy(logits, targets):
"""Computes the accuracy for the episode.
The accuracy for the episode is the proportion of correctly classified test
examples from the overall number of test examples.
Args:
logits: A float array, of shape [num_test_examples, num_classes].
targets: An int array, of the same shape as logits.
Returns:
accuracy: A float. The precision for the given class.
"""
preds = np.argmax(logits, axis=1)
return np.mean(np.equal(preds, targets))
def get_shot_to_precision(shots, logits, targets):
"""Performance of a particular class as a function of its 'shot'.
Args:
shots: A list containing a np.array per episode. The shape of an episode's
array is the [way]. Stores the 'shot' of each class, ie. the number of
training examples that that class has in the support set.
logits: A list containing a np.array per episode. The shape of an episode's
array is [size of test set, way].
targets: A list containing a np.array per episode. The shape of an episode's
array is [size of test set]. This contains integers in the range from 0 to
the way of the episode.
Returns:
shot_to_precision: A dict that maps each 'observed' shot to a list that
stores the precision obtained for that shot (each entry in this list is
the precision of a particular class that had this shot, regardless of the
shots of the other classes in the episode.).
"""
shot_to_precision = collections.defaultdict(list)
for episode_num, episode_shots in enumerate(shots):
episode_logits = logits[episode_num]
episode_targets = targets[episode_num]
for class_id, class_shot in enumerate(episode_shots):
class_precision = compute_class_precision(class_id, episode_logits,
episode_targets)
shot_to_precision[class_shot].append(class_precision)
return shot_to_precision
def get_imbalance_to_accuracy(class_props, logits, targets):
"""Accuracy as a function of imabalance.
Args:
class_props: A list containing a np.array per episode. The shape of an
episode's array is the [way]. Stores the 'normalized shot' of each class,
ie. the proportion of the examples of that class that are in the support
set of the episode.
logits: A list containing a np.array per episode. The shape of an episode's
array is [size of test set, way].
targets: A list containing a np.array per episode. The shape of an episode's
array is [size of test set]. This contains integers in the range from 0 to
the way of the episode.
Returns:
imbalance_to_accuracy: A dict mapping each observed imbalance (a float) to a
list that stores the accuracy of the episodes characterized by that degree
of imbalance.
Raises:
ValueError: There should have been exactly 2 elements in the list of each
episode's class id's.
"""
imbalance_to_accuracy = collections.defaultdict(list)
for episode_num, episode_class_props in enumerate(class_props):
if len(episode_class_props) != 2:
raise ValueError(
'There should have been exactly 2 elements in the list '
"of each episode's class_props (we only perform the "
'imbalance analysis on binary tasks). Instead, found: {}'.format(
len(episode_class_props)))
# Compute imbalance.
imbalance = max(episode_class_props) - min(episode_class_props)
# Compute the accuracy of the episode.
episode_logits = logits[episode_num]
episode_targets = targets[episode_num]
episode_acc = compute_episode_accuracy(episode_logits, episode_targets)
imbalance_to_accuracy[imbalance].append(episode_acc)
return imbalance_to_accuracy
def get_way_to_accuracy(ways, logits, targets):
"""Accuracy as a function of the episode's way.
Args:
ways: A list containing the 'way' of each episode.
logits: A list containing a np.array per episode. The shape of an episode's
array is [size of test set, way].
targets: A list containing a np.array per episode. The shape of an episode's
array is [size of test set]. This contains integers in the range from 0 to
the way of the episode.
Returns:
way_to_accuracy: A dict that maps each 'observed' way to a list that
stores the accuracy obtained for different episodes of that way.
"""
way_to_accuracy = collections.defaultdict(list)
for episode_num, episode_way in enumerate(ways):
episode_logits = logits[episode_num]
assert episode_way == episode_logits.shape[1]
episode_targets = targets[episode_num]
episode_acc = compute_episode_accuracy(episode_logits, episode_targets)
way_to_accuracy[episode_way].append(episode_acc)
return way_to_accuracy
def get_finegrainedness_split_enum():
"""Returns the Split corresponding to FLAGS.eval_finegrainedness_split."""
if FLAGS.eval_finegrainedness_split == 'train':
split_enum = learning_spec.Split.TRAIN
elif FLAGS.eval_finegrainedness_split == 'valid':
split_enum = learning_spec.Split.VALID
elif FLAGS.eval_finegrainedness_split == 'test':
split_enum = learning_spec.Split.TEST
return split_enum
def get_synsets_from_class_ids(class_ids):
"""Returns the Synsets of the appropriate subgraph corresponding to class_ids.
For each class id in class_ids, the corresponding Synset is found among the
Synsets of the subgraph corresponding to the split that is chosen for the
fine-grainedness analysis.
Args:
class_ids: A np.array of ints in the range between 1 and the total number of
classes that contains the two class id's chosen for an episode.
Returns:
A list of Synsets.
Raises:
ValueError: The dataset specification is not found in the expected location.
"""
# First load the DatasetSpecification of ImageNet.
dataset_records_path = os.path.join(FLAGS.records_root_dir, 'ilsvrc_2012')
imagenet_data_spec = dataset_spec.load_dataset_spec(dataset_records_path)
# A set of Synsets of the split's subgraph.
split_enum = get_finegrainedness_split_enum()
split_subgraph = imagenet_data_spec.split_subgraphs[split_enum]
# Go from class_ids (integers in the range from 1 to the total number of
# classes in the Split) to WordNet id's, e.g n02075296.
wn_ids = []
for class_id in class_ids:
wn_ids.append(imagenet_data_spec.class_names[class_id])
# Find the Synsets in split_subgraph whose WordNet id's are wn_ids.
synsets = imagenet_spec.get_synsets_from_ids(wn_ids, split_subgraph)
return [synsets[wn_id] for wn_id in wn_ids]
def get_height_to_accuracy(class_ids, logits, targets):
"""Accuracy as a function of the height of class' the lowest common ancestor.
This is only applicable to 2-way ImageNet episodes. Given the class set of
each episode, we find the corresponding 2 leaves of the ImageNet graph and
compute the lowest common ancestor of those leaves. Its height is computed as
the maximum over the length of the paths from that node to each of the two
leaves. This height is the estimate of fine-grainedness. Intuitively, the
larger the height, the more coarse-grained the episode's classification task.
Args:
class_ids: A list containing a np.array per episode that contains the two
class id's chosen for the episode's binary classification task. These id's
are ints in the range between 1 and the total number of classes.
logits: A list containing a np.array per episode. The shape of an episode's
array is [size of test set, way].
targets: A list containing a np.array per episode. The shape of an episode's
array is [size of test set]. This contains integers in the range from 0 to
the way of the episode.
Returns:
height_to_accuracy: A dict that maps each 'observed' height to a list that
contains the accuracies obtained for different episodes with that height.
Raises:
ValueError: There should have been exactly 2 elements in the list of each
episode's class id's.
"""
height_to_accuracy = collections.defaultdict(list)
for episode_num, episode_class_ids in enumerate(class_ids):
if len(episode_class_ids) != 2:
raise ValueError('There should have been exactly 2 elements in the list '
"of each episode's class id's.")
# Get the Synsets corresponding to the class id's episode_class_ids.
episode_synsets = get_synsets_from_class_ids(episode_class_ids)
assert len(episode_synsets) == 2, ('Fine- vs coarse- grained analysis '
'should be restricted to binary tasks.')
# Compute the height of the lowest common ancestor of the episode's Synsets.
_, height = imagenet_spec.get_lowest_common_ancestor(
episode_synsets[0], episode_synsets[1])
# Compute the accuracy of the episode.
episode_logits = logits[episode_num]
episode_targets = targets[episode_num]
episode_acc = compute_episode_accuracy(episode_logits, episode_targets)
height_to_accuracy[height].append(episode_acc)
return height_to_accuracy
def summarize_values_stats(d):
"""Summarizes each list value of dict d into a mean and confidence interval.
The summarized version of an empty dictionary, is also empty.
Args:
d: A dict where each value is a list.
Returns:
d_mean_ci: If d is not empty, a dict with the same keys as d but with each
value which was originally a list replaced with a tuple of the mean of
that list and the corresponding confidence interval.
Raises:
ValueError: The values of d are not lists.
"""
if not d:
return {}
for v in d.values():
if not isinstance(v, list):
raise ValueError('The values of the provided dict are not lists.')
d_mean_ci = {}
for k, v in d.items():
mean = np.mean(v)
ci = np.std(v) * 1.96 / np.sqrt(len(v))
d_mean_ci[k] = (mean, ci)
return d_mean_ci
def read_data(input_path, do_finegrainedness_analysis, do_imbalance_analysis):
"""Reads the data from the evaluation files.
Args:
input_path: The path to the event file to read from.
do_finegrainedness_analysis: Whether to perform analysis of fine- vs coarse-
grained tasks. This affects the tags that are necessary to find in the
event files.
do_imbalance_analysis: Whether to analyze performance for episodes that are
characterized by different degrees of imbalance.
Returns:
ways: A list containing the 'way' of each episode.
shots: A list containing a np.array per episode. The shape of an episode's
array is the [way].
class_ids: A list containing a np.array per episode which contains two class
id's representing the two classes chosen for that binary classification.
test_logits: A list containing a np.array per episode. The shape of an
episode's array is [size of test set, way].
test_targets: A list containing a np.array per episode. The shape of an
episode's array is [size of test set]. This contains integers in the range
from 0 to the way of the episode.
Raises:
ValueError: Finegrainedness analysis is requested but no summaries of
class_ids are found for the provided split, or imbalance analysis is
requested but no summaries of class_props are found.
"""
split = (
FLAGS.eval_finegrainedness_split
if FLAGS.eval_finegrainedness else 'test')
logging.info('Reading event file %s for summaries of split %s.', input_path,
split)
(ways, shots, class_props, class_ids, test_logits,
test_targets) = [], [], [], [], [], []
tags = set()
for e in tf.train.summary_iterator(input_path):
for v in e.summary.value:
tags.add(v.tag)
if v.tag == '{}_way'.format(split):
ways.append(v.simple_value)
elif v.tag == '{}_shots'.format(split):
shots.append(tf.make_ndarray(v.tensor))
elif v.tag == '{}_class_props'.format(split):
class_props.append(tf.make_ndarray(v.tensor))
elif v.tag == '{}_class_ids'.format(split):
class_ids.append(tf.make_ndarray(v.tensor))
elif v.tag == '{}_test_logits'.format(split):
test_logits.append(tf.make_ndarray(v.tensor))
elif v.tag == '{}_test_targets'.format(split):
test_targets.append(tf.make_ndarray(v.tensor))
if do_finegrainedness_analysis and not class_ids:
raise ValueError(
'No summaries found with tag: {}_class_ids. The tags that exist in the '
'event file are: {}.'.format(split, list(tags)))
if do_imbalance_analysis and not class_props:
raise ValueError(
'No summaries found with tag: {}_class_props. The tags that exist in '
'the event file are: {}.'.format(split, list(tags)))
return ways, shots, class_props, class_ids, test_logits, test_targets
def write_pkl(output_data, output_path):
"""Save output_data to the pickle at output_path."""
with tf.io.gfile.GFile(output_path, 'wb') as f:
pkl.dump(output_data, f, protocol=pkl.HIGHEST_PROTOCOL)
logging.info('Dumped data with keys: %s to location %s',
list(output_data.keys()), output_path)
def read_pkl(output_path):
"""Returns the contents of a pickle file or False if it doesn't exist."""
if tf.io.gfile.exists(output_path):
with tf.io.gfile.GFile(output_path, 'rb') as f:
data = pkl.load(f)
logging.info('Read data with keys: %s', list(data.keys()))
return data
else:
return False
def get_event_files(root_dir):
"""Returns all event files from the subdirectories of root_dir.
Args:
root_dir: A str. The root directory of evaluation experiments.
Assumes the following directory organization: root_dir contains a sub-
directory for every dataset, and each of those contains a directory named
'summaries' where an event file can be found.
"""
paths_to_events = []
summaries_dir = os.path.join(root_dir, 'summaries')
assert tf.io.gfile.isdir(summaries_dir), ('Could not find summaries in %s.' %
root_dir)
if int(FLAGS.restrict_to_subexperiment) > 0:
child_dirs = [os.path.join(summaries_dir, FLAGS.restrict_to_subexperiment)]
else:
child_dirs = [
os.path.join(summaries_dir, f)
for f in tf.io.gfile.listdir(summaries_dir)
]
# Filter out non-directory files, if any.
child_dirs = [child for child in child_dirs if tf.io.gfile.isdir(child)]
logging.info('Looking for events in dirs: %s', child_dirs)
for child_dir in child_dirs:
for file_name in tf.io.gfile.listdir(child_dir):
if 'event' in file_name:
paths_to_events.append(os.path.join(child_dir, file_name))
logging.info('Found events: %s', paths_to_events)
return paths_to_events
def get_output_path(path_to_event_file):
"""Returns the path where the pickle of output data will be stored.
Args:
path_to_event_file: The path where the event file lives. Used so that the
output pickle is stored in that same directory.
"""
# Get the directory where the event file was found.
event_dir, _ = os.path.split(path_to_event_file)
out_pickle_path = os.path.join(event_dir, 'aggregated_summary_dicts.pklz')
return out_pickle_path
def combine_dicts(dict_list):
"""Combines the dictionaries in dict_list.
Args:
dict_list: A list of dicts. Each dict maps integers to lists.
Returns:
combined: A dict that has for every key the 'combined' values of all dicts
in dict list that have that key. Combining the values for a key amounts to
concatenating the corresponding lists.
"""
combined = collections.defaultdict(list)
for d in dict_list:
for k, v in d.items():
combined[k].extend(v)
return combined
def analyze_events(paths_to_event_files, experiment_root_dir,
do_finegrainedness_analysis, do_imbalance_analysis,
force_recompute):
"""Analyzes each event file and stores the .pklz in the corresponding dir."""
# Aggregate stats across all event files and write (the summarized version of
# those) to the root directory.
shot_to_precision_all = []
way_to_accuracy_all = []
height_to_accuracy_all = []
imbalance_to_accuracy_all = []
for path_to_event in paths_to_event_files:
output_pickle = get_output_path(path_to_event)
# First check if the required data is already computed and written.
maybe_data = False if force_recompute else read_pkl(output_pickle)
if maybe_data:
logging.info('Output %s already exists. Skipping it.', output_pickle)
shot_to_precision = maybe_data['shot_to_precision']
way_to_accuracy = maybe_data['way_to_accuracy']
height_to_accuracy = maybe_data['height_to_accuracy']
imbalance_to_accuracy = maybe_data['imbalance_to_accuracy']
else:
# Read the data from the event files.
(ways, shots, class_props, class_ids, test_logits,
test_targets) = read_data(path_to_event, do_finegrainedness_analysis,
do_imbalance_analysis)
# A dict mapping each observed 'shot' to a list of class precisions
# obtained by classes that had that shot (regardless of shots of other
# classes in the same episode).
shot_to_precision = get_shot_to_precision(shots, test_logits,
test_targets)
# A dict mapping each observed 'way' of an episode to a list of accuracies
# of the episodes with that way.
way_to_accuracy = get_way_to_accuracy(ways, test_logits, test_targets)
# A dict mapping the height of the lowest common ancestor of each pair of
# leaves defining the binary classiifcation task to the task's accuracy.
height_to_accuracy = {}
if do_finegrainedness_analysis:
height_to_accuracy = get_height_to_accuracy(class_ids, test_logits,
test_targets)
# A dict mapping the degree of imabalance of tasks to their accuracy.
imbalance_to_accuracy = {}
if do_imbalance_analysis:
imbalance_to_accuracy = get_imbalance_to_accuracy(
class_props, test_logits, test_targets)
# Keep only the mean and confidence intervals instead of the list of all
# precisons or accuracies, resp. as the values of these dicts.
shot_to_precision_summarized = summarize_values_stats(shot_to_precision)
way_to_accuracy_summarized = summarize_values_stats(way_to_accuracy)
height_to_accuracy_summarized = summarize_values_stats(height_to_accuracy)
imbalance_to_accuracy_summarized = summarize_values_stats(
imbalance_to_accuracy)
# Save the two dicts to a pickle at the designated location.
output_data = {
'shot_to_precision': shot_to_precision,
'way_to_accuracy': way_to_accuracy,
'height_to_accuracy': height_to_accuracy,
'imbalance_to_accuracy': imbalance_to_accuracy,
'shot_to_precision_summarized': shot_to_precision_summarized,
'way_to_accuracy_summarized': way_to_accuracy_summarized,
'height_to_accuracy_summarized': height_to_accuracy_summarized,
'imbalance_to_accuracy_summarized': imbalance_to_accuracy_summarized,
}
write_pkl(output_data, output_pickle)
shot_to_precision_all.append(shot_to_precision)
way_to_accuracy_all.append(way_to_accuracy)
height_to_accuracy_all.append(height_to_accuracy)
imbalance_to_accuracy_all.append(imbalance_to_accuracy)
# Now aggregate the stats across datasets.
shot_to_precision_all = combine_dicts(shot_to_precision_all)
way_to_accuracy_all = combine_dicts(way_to_accuracy_all)
height_to_accuracy_all = combine_dicts(height_to_accuracy_all)
imbalance_to_accuracy_all = combine_dicts(imbalance_to_accuracy_all)
# Summarize them.
shot_to_precision_all_summarized = summarize_values_stats(
shot_to_precision_all)
way_to_accuracy_all_summarized = summarize_values_stats(way_to_accuracy_all)
height_to_accuracy_all_summarized = summarize_values_stats(
height_to_accuracy_all)
imbalance_to_accuracy_all_summarized = summarize_values_stats(
imbalance_to_accuracy_all)
# Save the dicts to a pickle at the designated location.
output_data = {
'shot_to_precision': shot_to_precision_all,
'way_to_accuracy': way_to_accuracy_all,
'height_to_accuracy': height_to_accuracy_all,
'shot_to_precision_summarized': shot_to_precision_all_summarized,
'way_to_accuracy_summarized': way_to_accuracy_all_summarized,
'height_to_accuracy_summarized': height_to_accuracy_all_summarized,
'imbalance_to_accuracy_summarized': imbalance_to_accuracy_all_summarized
}
pickle_name_base = 'aggregated_summary_dicts'
if int(FLAGS.restrict_to_subexperiment) > 0:
pickle_name_base += '_eval_{}'.format(FLAGS.restrict_to_subexperiment)
output_pickle = os.path.join(experiment_root_dir, pickle_name_base + '.pklz')
write_pkl(output_data, output_pickle)
def main(argv):
del argv
paths_to_event_files = get_event_files(FLAGS.root_dir)
if not paths_to_event_files:
logging.info('No event files found.')
return
analyze_events(paths_to_event_files, FLAGS.root_dir,
FLAGS.eval_finegrainedness, FLAGS.eval_imbalance,
FLAGS.force_recompute)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
tf.app.run(main)
| apache-2.0 | 449,227,501,078,236,160 | 41.201563 | 80 | 0.703876 | false |
BioinfUD/K-mersCL | src/multiple_execution.py | 1 | 9155 | import argparse
import os
from shutil import copyfile
import subprocess
import sys
from utils.analyze_taken_metrics import merge_metrics
import config
from config import MSPK_PARTITION_PATH, THREADS, TOTAL_CORES, KMC_PATH, CLEAR_MEM_SCRIPT_PATH
def parse_arguments():
parser = argparse.ArgumentParser(
description="Executes multiple times the get super kmer script to do a performance assesment")
parser.add_argument('--kmers', dest="kmers", default="31",
help="Kmer size to perform performance assesment (Comma separated). Default value: 31")
parser.add_argument('--mmers', dest="mmers", default="4",
help="Mmer size to perform performance assesment (Comma separated)")
parser.add_argument('--input_files', dest="input_files", help="List of paths to evaluate files (Comma separated)")
parser.add_argument('--read_sizes', dest="read_sizes",
help="Read size of each file specified on --input_files option")
parser.add_argument('--output_path', dest="output_path", default="output_superkmers",
help="Folder where the stats and output will be stored")
parser.add_argument('--methods', dest="methods", default="kmerscl", help="Which method will be used to process reads (mspk or kmerscl), (comma separated for multiple)")
parser.add_argument('--n_reads', dest="n_reads", default=None, help="Number of reads in each file (Comma separated values). If not specified this will be estimated")
args = parser.parse_args()
if any(x is None for x in [args.kmers, args.mmers, args.input_files, args.read_sizes, args.output_path, args.methods]):
parser.print_help()
sys.exit(0)
kmers = args.kmers.split(",")
mmers = args.mmers.split(",")
input_files = args.input_files.split(",")
read_sizes = args.read_sizes.split(",")
output_path = args.output_path
methods = ["kmerscl"] if not args.methods else args.methods.split(",")
n_reads = None if not args.n_reads else args.n_reads.split(",")
return kmers, mmers, input_files, read_sizes, output_path, methods, n_reads
def execute_metrics_collection(full_output_path):
# This should be async
path = os.path.join(full_output_path, "metrics")
if not os.path.exists(path):
os.system('mkdir -p {}'.format(path))
cpu_command = "sar -P ALL 1 99999 > {}/sar_cpu_file.log".format(path)
memio_command = "sar -b -r 1 99999 > {}/sar_mem_io_file.log".format(path)
base_command = "nvidia-smi --query-gpu=utilization.gpu,utilization.memory,memory.total,memory.free,memory.used --format=csv -l 1"
if hasattr(config, 'SPECIFIC_GPU'):
base_command += " -i {}".format(config.SPECIFIC_GPU)
nvidia_command = "{} | ts %s, >> {}/nvidia_gpu.log ".format(base_command, path)
process_cpu = subprocess.Popen("LC_TIME='C' exec " + cpu_command, shell=True)
process_memio = subprocess.Popen("LC_TIME='C' exec " +memio_command, shell=True)
process_nvidia = subprocess.Popen("LC_TIME='C' exec " +nvidia_command, shell=True)
return process_cpu, process_memio, process_nvidia
def execute_kmercl(params):
# Sync
params['output_path'] = "{output_path}/output_files".format(**params)
command = "python2 -u getSuperK2_M.py --kmer {kmer} --mmer {mmer} --input_file {input_file} --read_size {read_size} --output_path {output_path}".format(**params)
print "Executing {}".format(command)
if params['n_reads']:
command += " --n_reads {}".format(params['n_reads'])
command += " | ts %s, > {log_output_path}".format(**params)
sys.stdout.write("Executing '{}' \n".format(command))
subprocess.call(command, shell=True)
def execute_kmercl_signature(params):
# Sync
params['output_path'] = "{output_path}/output_files".format(**params)
command = "python2 -u getSuperK2_M_signature.py --kmer {kmer} --mmer {mmer} --input_file {input_file} --read_size {read_size} --output_path {output_path}".format(**params)
print "Executing {}".format(command)
if params['n_reads']:
command += " --n_reads {}".format(params['n_reads'])
command += " | ts %s, | tee {log_output_path}".format(**params)
sys.stdout.write("Executing '{}' \n".format(command))
subprocess.call(command, shell=True)
def execute_kmc(params):
params['working_dir'] = "{output_path}tmp".format(**params)
params['output_path'] = "{output_path}/output_files.res".format(**params)
params['n_cores'] = THREADS
command = KMC_PATH + " -k{kmer} -p{mmer} -t{n_cores} -fa {input_file} {output_path} {working_dir} | ts %s, | tee {log_output_path}".format(**params)
sys.stdout.write("Executing '{}' \n".format(command))
subprocess.call(command, shell=True)
# We need to copy mspk since is not possible to configure the output path and handling cd and stuff will be harder
def copyMSPK(params):
print "Copying mspk to: {}".format(params['output_path'])
copy_path = params['output_path']
copyfile(os.path.join(MSPK_PARTITION_PATH, "Partition.class"), copy_path + "Partition.class")
copyfile(os.path.join(MSPK_PARTITION_PATH, "Partition$MyThreadStep1.class"), copy_path +"Partition$MyThreadStep1.class")
copyfile(os.path.join(MSPK_PARTITION_PATH, "guava-19.0.jar"), copy_path + "guava-19.0.jar")
def execute_mspk(params):
params['output_path'] = os.path.join(params['output_path'])
params['n_cores'] = THREADS
command = "cd {output_path} && java -cp guava-19.0.jar: Partition -in {input_file} -k {kmer} -L {read_size} -p {mmer} -t {n_cores} | ts %s, > {log_output_path}".format(**params)
sys.stdout.write("Executing '{}' \n".format(command))
subprocess.call(command, shell=True)
pass
def execute_sleep(seconds):
sleep_time = seconds
command = "sleep {}".format(sleep_time)
subprocess.call("exec "+command, shell=True)
def execute_metrics_summary(full_output_path):
path = os.path.join(full_output_path, "metrics")
sys.stdout.write("Mergin metrics in {}".format(path))
merge_metrics(path, TOTAL_CORES, "2017-12-09")
def kill_processes(pids):
sys.stdout.write("Killing metrics collection processes {}\n".format(pids))
subprocess.call("killall -9 sar", shell=True)
subprocess.call("killall -9 sar", shell=True)
subprocess.call("killall -9 nvidia-smi", shell=True)
subprocess.call("sudo sh {}".format(CLEAR_MEM_SCRIPT_PATH), shell=True)
def delete_output_files(output_path):
os.system('rm -rf {}/output*'.format(output_path))
os.system('rm -rf {}/Node*'.format(output_path))
def execute_assesment(kmer, mmer, input_file, read_size, output_path, method, n_reads):
params = {'mmer': mmer, 'input_file_name': input_file.split("/")[-1], 'kmer': kmer, 'output_path': output_path,
'read_size': read_size, 'input_file': input_file, "method": method, "n_reads": n_reads}
full_output_path = os.path.join(params['output_path'], "{method}-k{kmer}-m{mmer}-r{read_size}-{input_file_name}/".format(**params))
print full_output_path
os.system('mkdir -p {}'.format(full_output_path))
os.system('mkdir -p {}/tmp'.format(full_output_path))
# Rewrite for specific output
params['output_path'] = full_output_path
params['log_output_path'] = os.path.join(full_output_path, "metrics", "tool_log.csv")
# Copy this before metrics collection start
if method == "mspk":
copyMSPK(params)
process_cpu, process_memio, process_nvidia = execute_metrics_collection(full_output_path)
sys.stdout.write(" ***************************************** \n"\
"Execution performance assesment \n"\
"Output path {} \n"\
"***********************************\n".format(full_output_path))
sys.stdout.write("Letting metrics collection process to start \n")
execute_sleep(3)
if method == "kmerscl":
execute_kmercl(params)
if method == "kmerscl_signature":
execute_kmercl_signature(params)
if method == "kmc":
execute_kmc(params)
if method == "mspk":
execute_mspk(params)
if method == "sleep":
execute_sleep(params)
sys.stdout.write("Letting metrics collection process to finish \n")
execute_sleep(3)
kill_processes([process_cpu.pid, process_memio.pid, process_nvidia.pid])
execute_metrics_summary(full_output_path)
# To avoid the file system to get full
delete_output_files(params['output_path'])
def main():
kmers, mmers, input_files, read_sizes, output_path, methods, n_reads = parse_arguments()
for kmer in kmers:
for mmer in mmers:
for method in methods:
for idx, input_file in enumerate(input_files):
n_read = None if not n_reads else n_reads[idx]
try:
execute_assesment(kmer, mmer, input_file, read_sizes[idx], output_path, method, n_read)
except Exception as e:
sys.stdout.write("Exception {} generated with parameters {} \n".format(str(e), [kmer, mmer, input_file, read_sizes[idx], output_path, method]))
if __name__ == '__main__':
main()
| mit | 5,334,840,391,572,585,000 | 50.723164 | 181 | 0.6497 | false |
redox-alpha/omorfi | src/python/omorfi/twolc_formatter.py | 1 | 6872 | #!/usr/bin/env python3
from sys import exit, stderr
from .settings import (common_multichars, fin_consonants, fin_lowercase, fin_symbols, fin_uppercase, fin_vowels,
newword_boundary, optional_hyphen, word_boundary)
def format_copyright_twolc():
return """
! This automatically generated twolc data is originated from
! omorfi database.
! Copyright (c) 2014 Omorfi contributors
! This program is free software: you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation, version 3 of the License
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
! You should have received a copy of the GNU General Public License
! along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
def twolc_escape(s):
'''Escape symbols that have special meaning in twolc.'''
s = s.replace("%", "__PERCENT__")
for c in ' @<>0!:";_^(){}-[]/?+|&*=$,':
s = s.replace(c, "%" + c)
s = s.replace("%_%_PERCENT%_%_", "%%")
return s
def format_alphabet_twolc(format, ruleset):
twolcstring = 'Alphabet\n'
if ruleset.startswith('recase'):
twolcstring += '! Set of Finnish alphabets generated from python:\n'
for c in fin_lowercase:
twolcstring += c + '! allow lowercase as is\n'
twolcstring += c + ':' + c.upper() + '! allow uppercasing\n'
for c in fin_uppercase:
twolcstring += c + '! allow uppercase as is\n'
twolcstring += c + ':' + c.lower() + '! allow lowercasing\n'
for mcs in common_multichars:
twolcstring += twolc_escape(mcs) + '\n'
elif ruleset.startswith('uppercase'):
twolcstring += '! Set of Finnish alphabets generated from python:\n'
for c in fin_lowercase:
twolcstring += c + '! allow lowercase as is\n'
twolcstring += c + ':' + c.upper() + '! allow uppercasing\n'
for c in fin_uppercase:
twolcstring += c + '! allow uppercase as is\n'
for mcs in common_multichars:
twolcstring += twolc_escape(mcs) + '\n'
elif ruleset == 'hyphenate':
twolcstring += ' '.join(fin_lowercase) + '! lower\n'
twolcstring += ' '.join(fin_uppercase) + '! upper\n'
for mcs in common_multichars:
twolcstring += twolc_escape(mcs) + ':0 ! deleting all specials\n'
if mcs == optional_hyphen or mcs == word_boundary or mcs == newword_boundary:
twolcstring += twolc_escape(mcs) + \
':%-1 ! always hyphen or nothing\n'
twolcstring += '0:%-2 ! weaker hyphens\n'
elif ruleset == 'hyphens':
twolcstring += twolc_escape(optional_hyphen) + \
':0 ! boundary can be zero\n'
twolcstring += twolc_escape(optional_hyphen) + \
':%- ! or (ASCII) hyphen\n'
twolcstring += '%-\n'
for mcs in common_multichars:
if mcs != optional_hyphen:
twolcstring += twolc_escape(mcs) + '\n'
elif ruleset == 'apertium':
for mcs in common_multichars:
twolcstring += twolc_escape(mcs) + ':0 ! deleting all specials\n'
else:
print("Unknown ruleset", ruleset, file=stderr)
exit(1)
twolcstring += ';\n'
return twolcstring
def format_sets_twolc(format, ruleset):
twolcstring = 'Sets\n'
if ruleset.startswith('uppercase') or ruleset.startswith('recase'):
twolcstring += 'Lower = ' + ' '.join(fin_lowercase) + ' ;' + \
'! Lowercase alphabets\n'
twolcstring += 'Upper = ' + ' '.join(fin_uppercase) + ' ;' + \
'! Uppercase alphabets\n'
elif ruleset == 'hyphens':
twolcstring += 'Vowels = ' + ' '.join(fin_vowels) + ' ;' + \
'! Vowels\n'
twolcstring += 'UpperOrSyms = ' + ' '.join(fin_uppercase) + \
' ' + ' '.join([twolc_escape(s) for s in fin_symbols]) +\
'; ' + '! Symbols for likely hyphenated words\n'
elif ruleset == 'hyphenate':
twolcstring += 'Vowels = ' + ' '.join(fin_vowels) + ' ;' + \
'! Vowels\n'
twolcstring += 'Consonants = ' + ' '.join(fin_consonants) + ' ;' + \
'! Consonants\n'
elif ruleset == 'apertium':
pass
else:
print("missing ruleset", ruleset)
exit(1)
twolcstring += 'DUMMYSETCANBEUSEDTOTESTBUGS = a b c ;\n'
return twolcstring
def format_definitions_twolc(format, ruleset):
twolcstring = 'Definitions\n'
if ruleset == 'hyphenate':
twolcstring += 'WordBoundary = [ %- | :%- | ' \
+ word_boundary + ':0 | #: | .#. ] ;\n'
twolcstring += 'DUMMYDEFINITIONCANBEUSEDTOTESTBUGS = a | b | c ;\n'
return twolcstring
def format_rules_twolc(format, ruleset):
twolcstring = "Rules\n"
if ruleset == 'stub-phon':
twolcstring += '"Dummy rule"\na <= _ ;\n'
elif ruleset == 'recase-any':
twolcstring += '"Uppercase anywhere dummy rule"\n'
twolcstring += twolc_escape(optional_hyphen) + " <= _ ;\n"
elif ruleset == 'uppercase-first':
twolcstring += '"Require uppercase in beginning"\n'
twolcstring += 'LC:UC => .#. _ ;\n'
twolcstring += '\twhere LC in Lower UC in Upper matched ;\n'
elif ruleset == 'hyphens':
twolcstring += '"Disallow no hyphen between equal vowels"\n'
twolcstring += twolc_escape(optional_hyphen) + ':0 /<= ' + \
"VOWEL :0* _ :0* VOWEL ; where VOWEL in Vowels matched ;\n"
elif ruleset == 'hyphenate':
twolcstring += '"Hyphenate Before consonant clusters"\n'
twolcstring += "0:%-2 <=> Vowels (Consonants) (Consonants) _ Consonants Vowels ;\n"
twolcstring += '"Hyphenate between non-diphtongs"\n'
twolcstring += "0:%-3 <=> Vx _ Vy ;\n"
twolcstring += "\twhere Vx in (a a a a a e e e e i i i i o o o o o u u u u u y y y y y ä ä ä ä ä ö ö ö ö)\n"
twolcstring += "\t\tVy in (e o y ä ö a o ä ö a o ä ö a e y ä ö a e y ä ö e ä a o u e ö a o u ä a o u) matched ;\n"
twolcstring += '"Hyphenate diphtongs in latter syllables"\n'
twolcstring += "0:%-4 <=> WordBoundary (Consonants) (Consonants) [Vowels (Vowels) Consonants (Consonants)]+ Vx _ Vy ;\n"
twolcstring += "\twhere Vx in (a e o u y ä ö a e i o ä ö u y i e i)\n"
twolcstring += "\t\tVy in (i i i i i i i u u u u y y o ö y y e) matched ;\n"
elif ruleset == 'apertium':
twolcstring += '"Remove stuffs"\n'
twolcstring += "a <= _ ; ! remove everywhere\n"
else:
print("Unknown ruleset", ruleset, file=stderr)
exit(1)
return twolcstring
| gpl-3.0 | -770,607,448,415,127,000 | 43.16129 | 128 | 0.582177 | false |
efforia/eos-dashboard | cloud/nexgyn/adminx.py | 1 | 1067 | import xadmin
from models import *
class MenstruacaoAdmin(object):
list_display = Menstruacao._meta.get_all_field_names()
class AntecedenteAdmin(object):
list_display = Antecedente._meta.get_all_field_names()
class OutroAdmin(object):
list_display = Outro._meta.get_all_field_names()
class AtividadeAdmin(object):
list_display = Atividade._meta.get_all_field_names()
class TabelaAnomaliaAdmin(object):
list_display = TabelaAnomalia._meta.get_all_field_names()
class TabelaMamariaAdmin(object):
list_display = TabelaMamaria._meta.get_all_field_names()
class TabelaGinecologicaAdmin(object):
list_display = TabelaGinecologica._meta.get_all_field_names()
xadmin.site.register(Menstruacao, MenstruacaoAdmin)
xadmin.site.register(Antecedente, AntecedenteAdmin)
xadmin.site.register(Outro, OutroAdmin)
xadmin.site.register(Atividade, AtividadeAdmin)
xadmin.site.register(TabelaAnomalia, TabelaAnomaliaAdmin)
xadmin.site.register(TabelaMamaria, TabelaMamariaAdmin)
xadmin.site.register(TabelaGinecologica, TabelaGinecologicaAdmin)
| lgpl-3.0 | -8,351,986,618,758,862,000 | 27.078947 | 65 | 0.789128 | false |
HybridF5/jacket | jacket/tests/storage/unit/volume/drivers/emc/scaleio/test_create_volume.py | 1 | 4940 | # Copyright (c) 2013 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jacket import context
from jacket.storage import exception
from jacket.tests.storage.unit import fake_volume
from jacket.tests.storage.unit.volume.drivers.emc import scaleio
class TestCreateVolume(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver.create_volume()``"""
def setUp(self):
"""Setup a test case environment.
Creates a fake volume object and sets up the required API responses.
"""
super(TestCreateVolume, self).setUp()
ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.volume = fake_volume.fake_volume_obj(ctx)
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'types/Volume/instances/getByName::' +
self.volume.name: '"{}"'.format(self.volume.id),
'types/Volume/instances': {'id': self.volume.id},
'types/Domain/instances/getByName::' +
self.PROT_DOMAIN_NAME:
'"{}"'.format(self.PROT_DOMAIN_ID),
'types/Pool/instances/getByName::{},{}'.format(
self.PROT_DOMAIN_ID,
self.STORAGE_POOL_NAME
): '"{}"'.format(self.STORAGE_POOL_ID),
},
self.RESPONSE_MODE.Invalid: {
'types/Domain/instances/getByName::' +
self.PROT_DOMAIN_NAME: None,
'types/Pool/instances/getByName::{},{}'.format(
self.PROT_DOMAIN_ID,
self.STORAGE_POOL_NAME
): None,
},
self.RESPONSE_MODE.BadStatus: {
'types/Volume/instances': self.BAD_STATUS_RESPONSE,
'types/Domain/instances/getByName::' +
self.PROT_DOMAIN_NAME: self.BAD_STATUS_RESPONSE,
'types/Pool/instances/getByName::{},{}'.format(
self.PROT_DOMAIN_ID,
self.STORAGE_POOL_NAME
): self.BAD_STATUS_RESPONSE,
},
}
def test_no_domain(self):
"""No protection domain name or ID provided."""
self.driver.protection_domain_name = None
self.driver.protection_domain_id = None
self.assertRaises(exception.VolumeBackendAPIException,
self.test_create_volume)
def test_no_domain_id(self):
"""Only protection domain name provided."""
self.driver.protection_domain_id = None
self.driver.protection_domain_name = self.PROT_DOMAIN_NAME
self.driver.storage_pool_name = None
self.driver.storage_pool_id = self.STORAGE_POOL_ID
self.test_create_volume()
def test_no_domain_id_invalid_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_no_domain_id)
def test_no_domain_id_badstatus_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_no_domain_id)
def test_no_storage_id(self):
"""Only protection domain name provided."""
self.driver.storage_pool_id = None
self.driver.storage_pool_name = self.STORAGE_POOL_NAME
self.driver.protection_domain_id = self.PROT_DOMAIN_ID
self.driver.protection_domain_name = None
self.test_create_volume()
def test_no_storage_id_invalid_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_no_storage_id)
def test_no_storage_id_badstatus_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_no_storage_id)
def test_create_volume(self):
"""Valid create volume parameters"""
self.driver.create_volume(self.volume)
def test_create_volume_badstatus_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_create_volume)
| apache-2.0 | 3,500,929,685,928,871,000 | 41.956522 | 78 | 0.619636 | false |
kichkasch/pygls | src/pygls/GLSCommands.py | 1 | 2417 | """
Python library for GPS Location Sharing - collection of constants for commands and replies for the GLS server.
All constants starting with "co" are commands sent from the client to the server. All constants starting with
"re" are replies, which are sent back from the server to the client in reply to a request.
http://www.assembla.com/wiki/show/dZdDzazrmr3k7AabIlDkbG
@author: Michael Pilgermann
@contact: mailto:[email protected]
@contact: http://www.kichkasch.de
@license: GPL (General Public License)
@var CO_VERSION: Command string of protocol - request version.
@type CO_VERSION: C{String}
@var CO_LOGIN: Command string of protocol - login attempt.
@type CO_LOGIN: C{String}
@var CO_DEVICE: Command string of protocol - submit device information.
@type CO_DEVICE: C{String}
@var CO_POSITION: Command string of protocol - submit or request position information.
@type CO_POSITION: C{String}
@var CO_WAYPOINT: Command string of protocol - submit or request waypoint information.
@type CO_WAYPOINT: C{String}
@var CO_GROUP: Command string of protocol - enter group or request group information.
@type CO_GROUP: C{String}
@var CO_QUIT: Command string of protocol - shut down connection.
@type CO_QUIT: C{String}
@var RE_OK: Command reply from server of protocol - OK, sucessful processed last request.
@type RE_OK: C{String}
@var RE_CHANGE: Command reply from server of protocol - processing not sucessful because of business rules.
@type RE_CHANGE: C{String}
@var RE_ERROR: Command reply from server of protocol - error, due to validation rules.
@type RE_ERROR: C{String}
@var RE_GROUP: Command reply from server of protocol - sending of a group.
@type RE_GROUP: C{String}
@var RE_POSITION: Command reply from server of protocol - sending position of a client.
@type RE_POSITION: C{String}
@var RE_WAYPOINT: Command reply from server of protocol - sending a waypoint of a client.
@type RE_WAYPOINT: C{String}
@var RE_FINISHED: Command reply from server of protocol - a sequence was finished.
@type RE_FINISHED: C{String}
@var RE_QUIT: Command reply from server of protocol - server is closing connection.
@type RE_QUIT: C{String}
"""
# Commands
CO_VERSION = "V"
CO_LOGIN = "N"
CO_DEVICE = "D"
CO_POSITION = "P"
CO_WAYPOINT = "W"
CO_GROUP = "G"
CO_QUIT = "Q"
# Replies
RE_OK = "K"
RE_CHANGE = "C"
RE_ERROR = "E"
RE_GROUP = "G"
RE_POSITION = "P"
RE_WAYPOINT = "W"
RE_FINISHED = "F"
RE_QUIT = "Q"
| gpl-3.0 | -3,231,831,272,823,069,000 | 36.184615 | 110 | 0.745966 | false |
richteer/xmpp-bot | modules/quote.py | 1 | 1424 | import random
import os
quotes = []
def init():
global quotes
quotes = []
with open("quotes.txt","r")as f:
quotes = f.read().splitlines()
def quote_handler(string):
if len(quotes) == 0:
return "Error: quotes.txt wasn't initialized properly?"
if string == None:
return quotes[random.randint(0, len(quotes)-1)]
temp = [q for q in quotes if string.lower() in q.lower()]
if len(temp) == 0:
return "No quotes match your string"
return temp[random.randint(0, len(temp)-1)]
def quoteadd_handler(string):
if string == None:
return "No quote supplied!"
if len(quotes) >= 50:
return "Too many quotes in the buffer, sorry :("
quotes.append(string)
return "Added! :)"
def writequotes_handler(string):
try:
with open("quotes.txt","w") as f:
f.write("\n".join(quotes))
except:
return "Could not write to 'quotes.txt', sorry..."
return "Updated 'quotes.txt'!"
commands = {
"!quote":quote_handler,
"!quoteadd":quoteadd_handler
}
admincommands = {
"!writequotes":writequotes_handler,
"!reloadquotes":lambda x: init()
}
help_text = {
"!quote":"\nUsage: !quote [substring]\nReplies with a random quote from 'quotes.txt'.\nIf an argument is supplied, return only quotes that contain that substring",
"!quoteadd":"\nUsage: !quoteadd <quote>\nTemporarily add the quote into the pool of quotes.\nNOTE: Quotes added this way will NOT persist when the bot is shut down, or the module is reloaded."
}
| bsd-3-clause | -1,621,905,694,427,489,300 | 26.384615 | 193 | 0.691713 | false |
getsentry/zeus | zeus/api/resources/base_change_request.py | 1 | 1467 | from flask import request, Response
from sqlalchemy.orm import contains_eager
from zeus import auth
from zeus.constants import PERMISSION_MAP
from zeus.models import ChangeRequest, Repository, RepositoryProvider
from .base import Resource
class BaseChangeRequestResource(Resource):
def dispatch_request(
self,
provider: str,
owner_name: str,
repo_name: str,
cr_number: int,
*args,
**kwargs
) -> Response:
queryset = ChangeRequest.query.join(
Repository, Repository.id == ChangeRequest.repository_id
).filter(
Repository.provider == RepositoryProvider(provider),
Repository.owner_name == owner_name,
Repository.name == repo_name,
ChangeRequest.number == cr_number,
)
if self.select_resource_for_update():
queryset = queryset.with_for_update()
else:
# HACK(dcramer): we dont want to lock the repo row, so for now just deal
# w/ the consequences of this
queryset = queryset.options(contains_eager("repository"))
cr = queryset.first()
if not cr:
return self.not_found()
tenant = auth.get_current_tenant()
if not tenant.has_permission(cr.repository_id, PERMISSION_MAP[request.method]):
return self.error("permission denied", 400)
return Resource.dispatch_request(self, cr, *args, **kwargs)
| apache-2.0 | 8,692,239,293,364,636,000 | 33.116279 | 87 | 0.628494 | false |
tbabej/freeipa | ipatests/test_ipaserver/test_otptoken_import.py | 1 | 5773 | # Authors:
# Nathaniel McCallum <[email protected]>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import pytest
from nss import nss
from ipalib.x509 import initialize_nss_database
from ipaserver.install.ipa_otptoken_import import PSKCDocument, ValidationError
basename = os.path.join(os.path.dirname(__file__), "data")
@pytest.mark.skipif(True, reason="Causes NSS errors. Ticket 5192")
@pytest.mark.tier1
class test_otptoken_import(object):
def teardown(self):
initialize_nss_database()
def test_figure3(self):
doc = PSKCDocument(os.path.join(basename, "pskc-figure3.xml"))
assert doc.keyname is None
assert [(t.id, t.options) for t in doc.getKeyPackages()] == \
[(u'12345678', {
'ipatokenotpkey': u'GEZDGNBVGY3TQOJQGEZDGNBVGY3TQOJQ',
'ipatokenvendor': u'Manufacturer',
'ipatokenserial': u'987654321',
'ipatokenhotpcounter': 0,
'ipatokenotpdigits': 8,
'type': u'hotp',
})]
def test_figure4(self):
doc = PSKCDocument(os.path.join(basename, "pskc-figure4.xml"))
assert doc.keyname is None
try:
[(t.id, t.options) for t in doc.getKeyPackages()]
except ValidationError: # Referenced keys are not supported.
pass
else:
assert False
def test_figure5(self):
doc = PSKCDocument(os.path.join(basename, "pskc-figure5.xml"))
assert doc.keyname is None
try:
[(t.id, t.options) for t in doc.getKeyPackages()]
except ValidationError: # PIN Policy is not supported.
pass
else:
assert False
def test_figure6(self):
nss.nss_init_nodb()
try:
doc = PSKCDocument(os.path.join(basename, "pskc-figure6.xml"))
assert doc.keyname == 'Pre-shared-key'
doc.setKey('12345678901234567890123456789012'.decode('hex'))
assert [(t.id, t.options) for t in doc.getKeyPackages()] == \
[(u'12345678', {
'ipatokenotpkey': u'GEZDGNBVGY3TQOJQGEZDGNBVGY3TQOJQ',
'ipatokenvendor': u'Manufacturer',
'ipatokenserial': u'987654321',
'ipatokenhotpcounter': 0,
'ipatokenotpdigits': 8,
'type': u'hotp'})]
finally:
nss.nss_shutdown()
def test_figure7(self):
nss.nss_init_nodb()
try:
doc = PSKCDocument(os.path.join(basename, "pskc-figure7.xml"))
assert doc.keyname == 'My Password 1'
doc.setKey('qwerty')
assert [(t.id, t.options) for t in doc.getKeyPackages()] == \
[(u'123456', {
'ipatokenotpkey': u'GEZDGNBVGY3TQOJQGEZDGNBVGY3TQOJQ',
'ipatokenvendor': u'TokenVendorAcme',
'ipatokenserial': u'987654321',
'ipatokenotpdigits': 8,
'type': u'hotp'})]
finally:
nss.nss_shutdown()
def test_figure8(self):
nss.nss_init_nodb()
try:
doc = PSKCDocument(os.path.join(basename, "pskc-figure8.xml"))
except NotImplementedError: # X.509 is not supported.
pass
else:
assert False
finally:
nss.nss_shutdown()
def test_invalid(self):
nss.nss_init_nodb()
try:
doc = PSKCDocument(os.path.join(basename, "pskc-invalid.xml"))
except ValueError: # File is invalid.
pass
else:
assert False
finally:
nss.nss_shutdown()
def test_mini(self):
nss.nss_init_nodb()
try:
doc = PSKCDocument(os.path.join(basename, "pskc-mini.xml"))
for t in doc.getKeyPackages():
t._PSKCKeyPackage__process()
except ValidationError: # Unsupported token type.
pass
else:
assert False
finally:
nss.nss_shutdown()
def test_full(self):
nss.nss_init_nodb()
try:
doc = PSKCDocument(os.path.join(basename, "full.xml"))
assert [(t.id, t.options) for t in doc.getKeyPackages()] == \
[(u'KID1', {
'ipatokenotpkey': u'GEZDGNBVGY3TQOJQGEZDGNBVGY3TQOJQ',
'ipatokennotafter': u'20060531000000Z',
'ipatokennotbefore': u'20060501000000Z',
'ipatokenserial': u'SerialNo-IssueNo',
'ipatokentotpclockoffset': 60000,
'ipatokenotpalgorithm': u'sha1',
'ipatokenvendor': u'iana.dummy',
'description': u'FriendlyName',
'ipatokentotptimestep': 200,
'ipatokenhotpcounter': 0,
'ipatokenmodel': u'Model',
'ipatokenotpdigits': 8,
'type': u'hotp',
})]
finally:
nss.nss_shutdown()
| gpl-3.0 | -3,333,372,417,580,155,400 | 35.537975 | 79 | 0.563312 | false |
Azure/azure-sdk-for-python | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_region_operations.py | 1 | 5693 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RegionOperations:
"""RegionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_service(
self,
resource_group_name: str,
service_name: str,
**kwargs
) -> AsyncIterable["_models.RegionListResult"]:
"""Lists all azure regions in which the service exists.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RegionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.apimanagement.models.RegionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_service.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RegionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/regions'} # type: ignore
| mit | 1,462,007,640,238,916,400 | 47.65812 | 188 | 0.641841 | false |
simondona/exp-control-bec-tn | libraries/parser.py | 1 | 15808 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2016 Simone Donadello
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
PRG_FILE_VERSION = "0.5.1"
import libraries.program as lib_program
import re, os
class Parser(object):
def __init__(self, system):
self.system = system
self.programs_folder = os.path.join("data", "programs")
def delete_program_file(self, prg_name):
if self.system.action_list.is_program(prg_name):
categories = self.system.action_list.get_dict(prg_name)["categories"]
path, fname = self.get_program_path(prg_name, categories)
save_name = os.path.join(path, fname)
if os.path.isfile(save_name):
print "deleting program '%s'"%save_name
os.remove(save_name)
else:
print "ERROR: filename '%s' not found"%save_name
self.system.init_actions()
def read_program_file(self, action_name):
categories = self.system.action_list.get_dict(action_name)["categories"]
path, fname = self.get_program_path(action_name, categories)
fname_path = os.path.join(path, fname)
if not os.path.isfile(fname_path):
print "ERROR: filename '%s' not found"%fname_path
return None
else:
print "reading '%s'"%fname_path
prg_str = ""
with open(fname_path, "r") as fid:
prg_str = fid.read()
prg_list = []
cmd_str0 = ""
cmd_str1 = ""
lines = prg_str.splitlines()
lines = [l for l in lines if l]
prg_flag = False
cmd_flag = cmd_loop_flag = False
cmd_first_flag = False
cmd_indent = ""
version = None
prg_lines = []
cmd_lines = ([], [])
for line in lines:
if version is None:
m_vers = re.match("^\s*prg_version\s*=\s*['\"](.*)['\"]$", line)
if m_vers is not None:
version = m_vers.group(1).split(".")
current_version = PRG_FILE_VERSION.split(".")
valid_version = False
for n in range(min(len(version), len(current_version))):
valid_version = int(version[n]) == int(current_version[n])
if not valid_version:
print "WARNING: outdated saved program version"
return_match = re.match("^\s*return\s+.*$", line) is not None
prg_start_match = re.match("^\s*def\s+program.*$", line) is not None
cmd_loop_start_match = re.match("^\s*while\s*\(\s*cmd.running\s*\)\s*:\s*$", line) is not None
cmd_init_start_match = re.match("^\s*def\s+commands.*$", line) is not None
if prg_start_match and not prg_flag and not cmd_flag:
prg_flag = True
if return_match and prg_flag and not cmd_flag:
prg_flag = False
if cmd_init_start_match and not prg_flag and not cmd_flag:
cmd_flag = True
cmd_first_flag = True
if cmd_loop_start_match and not prg_flag and cmd_flag:
cmd_loop_flag = True
cmd_first_flag = True
if return_match and not prg_flag and cmd_flag:
cmd_loop_flag = cmd_flag = False
if prg_flag and not prg_start_match:
prg_lines.append(line.strip())
if cmd_first_flag and not cmd_init_start_match \
and not cmd_loop_start_match and not return_match:
cmd_first_flag = False
cmd_indent = re.match("^(\s*)", line).group(1)
if cmd_flag and not cmd_loop_start_match and not cmd_init_start_match:
if cmd_loop_flag:
cmd_lines[1].append(re.sub(cmd_indent, "", line, count=1))
else:
cmd_lines[0].append(re.sub(cmd_indent, "", line, count=1))
for line in prg_lines:
#check if it is a valid line with add instruction
m0 = re.match("^\s*prg.add[(]\s*(.*)\s*[)]\s*$", line)
if m0 is None:
print "ERROR: skipping line '%s'"%line
else:
line = m0.group(1)
#split the line into prg.add function arguments
#(consider the first grade of parenthesis)
par_count = spar_count = 0
part_line = ""
split_line = []
for ch in line:
if ch == "(": par_count += 1
elif ch == ")": par_count -= 1
elif ch == "[": spar_count += 1
elif ch == "]": spar_count -= 1
if ch == "," and par_count == 0 and spar_count == 0:
split_line.append(part_line.strip())
part_line = ""
else:
part_line += ch
split_line.append(part_line.strip())
#the line is now splitted into arguments
line = split_line
#at least time and action must be present
if not len(line) >= 2:
print "ERROR: skipping line '%s'"%line
else:
#parse time and action name
time = line[0]
act_name = line[1].strip("'\"")
#parse function add agruments
#default values
functions = []
variables = []
enable = True
funct_enable = True
if len(line) > 2:
#parse arguments other than time and action name
for lin in line[2:]:
#match functions arguments
m_fun = re.match("\s*functions\s*=\s*(.*)", lin)
if m_fun is None:
#parse if not functions, split key and value
ar = re.split("\s*=\s*", lin)
if len(ar) == 2:
#if named argument
if ar[0] == "enable":
#if enamble arg
if ar[1] == "True":
enable = True
elif ar[1] == "False":
enable = False
else:
print "ERROR: enable value '%s' not valid"%ar[1]
else:
#otherwise variable arg
variables.append(ar)
else:
#positional variable arg
variables.append(ar)
else:
#parse functions into key and lambda function
m0 = re.match("^dict\s*[(]\s*(.*)[)]$", m_fun.group(1))
if m0 is not None:
for m1 in re.split("\s*,\s*", m0.group(1)):
m2 = re.match("^(\S*)\s*=\s*lambda\s+x\s*:\s*(.*)\s*$", m1)
m3 = re.match("^funct_enable\s*=\s*(True|False)\s*$", m1)
if m3 is not None and m2 is None:
if m3.group(1) == "True":
funct_enable = True
elif m3.group(1) == "False":
funct_enable = False
else:
print "ERROR: function enable value '%s' not valid"%m0.group(1)
elif m2 is not None and m3 is None:
functions.append(m2.groups())
else:
print "ERROR: functions of action '%s' not well formatted"%act_name
else:
print "ERROR: functions of action '%s' not well formatted"%act_name
functions = dict(functions)
new_line = self.system.action_list.get_dict(act_name)
if new_line is None:
print "ERROR: skipping action '%s' not found in the database"%act_name
else:
if len(new_line["vars"].keys()) != len(variables):
print "ERROR: variables '%s' not well formatted in action '%s'"%(str(variables), act_name)
else:
for uvar in variables:
if len(variables) > 1 and len(uvar) != 2:
print "ERROR: wrong variables declaration in action '%s'"%act_name
else:
if len(uvar) == 1:
key = new_line["vars"].keys()[0]
value = uvar[0]
else:
key = uvar[0]
value = uvar[1]
if key in new_line["var_formats"]:
fmt_str = new_line["var_formats"][key]
fmt = self.fmt_to_type(fmt_str)
else:
fmt = str
if fmt is int and "." in value:
value = float(value)
new_line["vars"][key] = fmt(value)
for key in functions:
new_line["functions"][key] = functions[key]
new_line["time"] = int(time)
new_line["enable"] = bool(enable)
new_line["funct_enable"] = bool(funct_enable)
prg_list.append(new_line)
for line in cmd_lines[0]:
cmd_str0 += line+"\n"
for line in cmd_lines[1]:
cmd_str1 += line+"\n"
return prg_list, (cmd_str0, cmd_str1)
def write_program_file(self, prg_name, categories, prg_list, prg_comment, cmd_str):
prg_str = ""
if prg_comment is None:
prg_comment = ""
prg_str += "prg_comment = \"%s\"\n"%prg_comment
prg_str += "prg_version = \"%s\"\n"%PRG_FILE_VERSION
prg_str += "def program(prg, cmd):\n"
for item in prg_list:
line = " prg.add(%d, \"%s\""%(int(item["time"]), str(item["name"]))
if len(item["vars"]) > 0:
var_items = []
for var_n in item["vars"]:
if var_n in item["var_formats"]:
type_fmt = item["var_formats"][var_n]
else:
type_fmt = "%s"
if var_n != "action_name":
fmt = self.fmt_to_type(type_fmt)
var_items.append([var_n, type_fmt%fmt(item["vars"][var_n])])
if len(var_items) != 1:
var_l = [itm[0]+"="+itm[1] for itm in var_items]
else:
var_l = [var_items[0][1]]
line += ", " + ", ".join(var_l)
f_items = item["functions"].items()
f_items = [it for it in f_items[:] if re.match("^\s*x?\s*$", it[1]) is None]
if len(f_items) > 0:
line += ", functions=dict("
for f_it in f_items:
line += f_it[0] + "=lambda x: " + f_it[1] + ", "
line = line[:-2]
if not item["funct_enable"]:
line += ", funct_enable=False"
line += ")"
if not item["enable"]:
line += ", enable=False"
line += ")\n"
prg_str += line
prg_str += " return prg\n"
cmd_str_init = [l for l in cmd_str[0].splitlines() if l]
cmd_str_loop = [l for l in cmd_str[1].splitlines() if l]
if len(cmd_str_loop)+len(cmd_str_init) > 0:
prg_str += "def commands(cmd):\n"
for l in cmd_str_init:
prg_str += " "+l+"\n"
if len(cmd_str_loop) > 0:
prg_str += " while(cmd.running):\n"
for l in cmd_str_loop:
prg_str += " "+l+"\n"
prg_str += " return cmd\n"
path, fname = self.get_program_path(prg_name, categories)
fname_path = os.path.join(path, fname)
print "writing '%s'"%fname_path
if not os.path.exists(path):
os.makedirs(path)
with open(fname_path, "w") as fid:
fid.write(prg_str)
self.system.init_actions()
def get_program_path(self, prg_name, categories):
return os.path.join(self.programs_folder, *categories), prg_name+".py"
def get_programs_dirs(self):
if os.path.isdir(self.programs_folder):
directs = []
for fold, _, _ in os.walk(self.programs_folder):
if self.programs_folder in fold:
directs.append(fold.replace(self.programs_folder, "", 1).strip("/"))
return directs
else:
print "ERROR: '%s' folder not found"%self.programs_folder
return
def fmt_to_type(self, fmt):
if "f" in fmt: typ = float
elif "d" in fmt: typ = int
elif "s" in fmt: typ = str
else:
typ = str
print "WARNING: unrecognized format \"%s\""%fmt
return typ
def get_actions_dict(self, only_prg=False):
if only_prg:
action_list = self.system.action_list.programs
else:
action_list = self.system.action_list.tot_list()
act_dict = dict()
for act_name in action_list:
act = self.system.action_list.get_dict(act_name)
act_dict[act_name] = act
return act_dict
def get_ramp_acts(self, action_name, *args, **kwargs):
prg = lib_program.Program(self.system, "")
prg.add(0, action_name, *args, **kwargs)
instructions = prg.get_all_instructions(only_enabled=False)
lst = []
for instr in instructions:
act_dict = self.system.action_list.get_dict(instr.action.name)
for uv in act_dict["vars"]:
act_dict["vars"][uv] = getattr(instr.action, uv)
act_dict["enable"] = instr.enable
act_dict["time"] = instr.time_clock
lst.append(act_dict)
return lst
| gpl-3.0 | -3,721,115,896,549,885,400 | 42.309589 | 118 | 0.441802 | false |
doron2402/CameraControlXYRoboticArm | PythonTemp/huoghTransform.py | 1 | 1195 | import cv2
import numpy as np
img = cv2.imread('image4_with_sample.jpg',0)
img1 = cv2.medianBlur(img,5)
cv2.imwrite('image4_gray_and_blur.jpg',img1)
#circles = cv2.HoughCircles(img1,cv2.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=0,maxRadius=0)
circles = cv2.HoughCircles(img1, cv2.HOUGH_GRADIENT, 1.2, 100)
# ensure at least some circles were found
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
print (x)
print (y)
print (r)
else:
print ("Circle not found!")
#circles = np.uint16(np.around(circles))
#for i in circles[0,:]:
# draw the outer circle
# cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
#cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
#cv2.imread('detected_circles.jpg',cimg)
| mit | -4,046,292,917,451,260,400 | 29.641026 | 101 | 0.654393 | false |
chichan01/xbob.measure.Bootstraps | setup.py | 1 | 2723 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Chi ho CHAN <[email protected]>
# Wed Jan 29 16:29 CEST 2014
# This file contains the python (distutils/setuptools) instructions so your
# package can be installed on **any** host system. It defines some basic
# information like the package name for instance, or its homepage.
#
# It also defines which other packages this python package depends on and that
# are required for this package's operation. The python subsystem will make
# sure all dependent packages are installed or will install them for you upon
# the installation of this package.
#
# The 'buildout' system we use here will go further and wrap this package in
# such a way to create an isolated python working environment. Buildout will
# make sure that dependencies which are not yet installed do get installed, but
# **without** requiring administrative privileges on the host system. This
# allows you to test your package with new python dependencies w/o requiring
# administrative interventions.
from setuptools import setup, find_packages
# The only thing we do in this file is to call the setup() function with all
# parameters that define our package.
setup(
# This is the basic information about your project. Modify all this
# information before releasing code publicly.
name='xbob.measure.Bootstraps',
version='0.0.1',
description='The FAR-FRR confidence interval of the DET curve',
url='http://pypi.python.org/pypi/xbob.measure.Bootstraps',
license='GPLv3',
author='Chi Ho CHAN',
author_email='[email protected]',
long_description=open('README.rst').read(),
# This line is required for any distutils based packaging.
packages=find_packages(),
include_package_data=True,
zip_safe=True,
# This line defines which packages should be installed when you "install"
# this package. All packages that are mentioned here, but are not installed
# on the current system will be installed locally and only visible to the
# scripts of this package. Don't worry - You won't need administrative
# privileges when using buildout.
install_requires=[
"setuptools",
"bob >= 1.3.0a0", # base signal proc./machine learning library
],
# This entry defines which scripts you will have inside the 'bin' directory
# once you install the package (or run 'bin/buildout'). The order of each
# entry under 'console_scripts' is like this:
# script-name-at-bin-directory = module.at.your.library:function
entry_points={
# bob unittest declaration
'bob.test': [
'Bootstraps = xbob.measure.Bootstraps.test_error:ErrorTest',
],
},
)
| gpl-3.0 | 2,788,399,006,084,333,000 | 39.044118 | 84 | 0.718325 | false |
stadtgestalten/stadtgestalten | grouprise/features/memberships/urls.py | 1 | 1599 | from django.conf import urls
from django.urls import path
from grouprise.features.memberships import views
from grouprise.features.memberships.views import Join
urlpatterns = [
path('<slug:group>/actions/join', Join.as_view(), name='join'),
urls.url(
r'^stadt/groups/join/confirm/(?P<secret_key>[a-z0-9]+)/$',
views.JoinConfirm.as_view(),
name='join-confirm'
),
urls.url(
r'^(?P<group_slug>[\w-]+)/join/request/$',
views.JoinRequest.as_view(),
name='join-request'
),
urls.url(
r'^stadt/groups/(?P<group_pk>[0-9]+)/members/$',
views.Members.as_view(),
name='members'),
urls.url(
r'^stadt/groups/(?P<group_pk>[0-9]+)/members/add/$',
views.MemberAdd.as_view(),
name='member-create'),
urls.url(
r'^stadt/groups/(?P<group_pk>[0-9]+)/resign/$',
views.Resign.as_view(),
name='resign'
),
urls.url(
r'^stadt/groups/resign/confirm/(?P<secret_key>[a-z0-9]+)/$',
views.ResignConfirm.as_view(),
name='resign-confirm'
),
urls.url(
r'^stadt/groups/(?P<group_pk>[0-9]+)/resign/request/$',
views.ResignRequest.as_view(),
name='resign-request'
),
urls.url(
r'^stadt/associations/(?P<association_pk>[0-9]+)/apply/$',
views.Apply.as_view(),
name='create-membership-application'),
urls.url(
r'^stadt/memberships/applications/(?P<application_pk>[0-9]+)/accept/$',
views.AcceptApplication.as_view(),
name='accept-membership-application'),
]
| agpl-3.0 | -6,415,189,586,337,832,000 | 29.169811 | 79 | 0.575985 | false |
dmach/dnf | dnf/conf/config.py | 1 | 18521 | # dnf configuration classes.
#
# Copyright (C) 2016-2017 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.yum import misc
from dnf.i18n import ucd, _
from dnf.pycomp import basestring
import fnmatch
import dnf.conf.substitutions
import dnf.const
import dnf.exceptions
import dnf.pycomp
import dnf.util
import hawkey
import logging
import os
import libdnf.conf
PRIO_EMPTY = libdnf.conf.Option.Priority_EMPTY
PRIO_DEFAULT = libdnf.conf.Option.Priority_DEFAULT
PRIO_MAINCONFIG = libdnf.conf.Option.Priority_MAINCONFIG
PRIO_AUTOMATICCONFIG = libdnf.conf.Option.Priority_AUTOMATICCONFIG
PRIO_REPOCONFIG = libdnf.conf.Option.Priority_REPOCONFIG
PRIO_PLUGINDEFAULT = libdnf.conf.Option.Priority_PLUGINDEFAULT
PRIO_PLUGINCONFIG = libdnf.conf.Option.Priority_PLUGINCONFIG
PRIO_COMMANDLINE = libdnf.conf.Option.Priority_COMMANDLINE
PRIO_RUNTIME = libdnf.conf.Option.Priority_RUNTIME
logger = logging.getLogger('dnf')
class BaseConfig(object):
"""Base class for storing configuration definitions.
Subclass when creating your own definitions.
"""
def __init__(self, config=None, section=None, parser=None):
self.__dict__["_config"] = config
self._section = section
def __getattr__(self, name):
if "_config" not in self.__dict__:
raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__, name))
option = getattr(self._config, name)
if option is None:
return None
try:
value = option().getValue()
except Exception as ex:
return None
if isinstance(value, str):
return ucd(value)
return value
def __setattr__(self, name, value):
option = getattr(self._config, name, None)
if option is None:
# unknown config option, store to BaseConfig only
return super(BaseConfig, self).__setattr__(name, value)
self._set_value(name, value, PRIO_RUNTIME)
def __str__(self):
out = []
out.append('[%s]' % self._section)
if self._config:
for optBind in self._config.optBinds():
try:
value = optBind.second.getValueString()
except RuntimeError:
value = ""
out.append('%s: %s' % (optBind.first, value))
return '\n'.join(out)
def _has_option(self, name):
method = getattr(self._config, name, None)
return method is not None
def _get_value(self, name):
method = getattr(self._config, name, None)
if method is None:
return None
return method().getValue()
def _get_priority(self, name):
method = getattr(self._config, name, None)
if method is None:
return None
return method().getPriority()
def _set_value(self, name, value, priority=PRIO_RUNTIME):
"""Set option's value if priority is equal or higher
than curent priority."""
method = getattr(self._config, name, None)
if method is None:
raise Exception("Option \"" + name + "\" does not exists")
option = method()
if value is None:
try:
option.set(priority, value)
except Exception:
pass
else:
try:
if isinstance(value, list) or isinstance(value, tuple):
option.set(priority, libdnf.conf.VectorString(value))
elif (isinstance(option, libdnf.conf.OptionBool)
or isinstance(option, libdnf.conf.OptionChildBool)
) and isinstance(value, int):
option.set(priority, bool(value))
else:
option.set(priority, value)
except RuntimeError as e:
raise dnf.exceptions.ConfigError(_("Error parsing '%s': %s")
% (value, str(e)),
raw_error=str(e))
def _populate(self, parser, section, filename, priority=PRIO_DEFAULT):
"""Set option values from an INI file section."""
if parser.hasSection(section):
for name in parser.options(section):
value = parser.getSubstitutedValue(section, name)
if not value or value == 'None':
value = ''
if hasattr(self._config, name):
try:
self._config.optBinds().at(name).newString(priority, value)
except RuntimeError as e:
logger.debug(_('Unknown configuration value: %s=%s in %s; %s'),
ucd(name), ucd(value), ucd(filename), str(e))
else:
if name == 'arch' and hasattr(self, name):
setattr(self, name, value)
else:
logger.debug(
_('Unknown configuration option: %s = %s in %s'),
ucd(name), ucd(value), ucd(filename))
def dump(self):
# :api
"""Return a string representing the values of all the
configuration options.
"""
output = ['[%s]' % self._section]
if self._config:
for optBind in self._config.optBinds():
# if not opt._is_runtimeonly():
try:
output.append('%s = %s' % (optBind.first, optBind.second.getValueString()))
except RuntimeError:
pass
return '\n'.join(output) + '\n'
@staticmethod
def write_raw_configfile(filename, section_id, substitutions, modify):
# :api
"""
filename - name of config file (.conf or .repo)
section_id - id of modified section (e.g. main, fedora, updates)
substitutions - instance of base.conf.substitutions
modify - dict of modified options
"""
parser = libdnf.conf.ConfigParser()
parser.read(filename)
# b/c repoids can have $values in them we need to map both ways to figure
# out which one is which
if not parser.hasSection(section_id):
for sect in parser.getData():
if libdnf.conf.ConfigParser.substitute(sect, substitutions) == section_id:
section_id = sect
for name, value in modify.items():
if isinstance(value, list):
value = ' '.join(value)
parser.setValue(section_id, name, value)
parser.write(filename, False)
class MainConf(BaseConfig):
# :api
"""Configuration option definitions for dnf.conf's [main] section."""
def __init__(self, section='main', parser=None):
# pylint: disable=R0915
config = libdnf.conf.ConfigMain()
super(MainConf, self).__init__(config, section, parser)
self._set_value('pluginpath', [dnf.const.PLUGINPATH], PRIO_DEFAULT)
self._set_value('pluginconfpath', [dnf.const.PLUGINCONFPATH], PRIO_DEFAULT)
self.substitutions = dnf.conf.substitutions.Substitutions()
self.arch = hawkey.detect_arch()
self._config.system_cachedir().set(PRIO_DEFAULT, dnf.const.SYSTEM_CACHEDIR)
# setup different cache and log for non-priviledged users
if dnf.util.am_i_root():
cachedir = dnf.const.SYSTEM_CACHEDIR
logdir = '/var/log'
else:
try:
cachedir = logdir = misc.getCacheDir()
except (IOError, OSError) as e:
msg = _('Could not set cachedir: {}').format(ucd(e))
raise dnf.exceptions.Error(msg)
self._config.cachedir().set(PRIO_DEFAULT, cachedir)
self._config.logdir().set(PRIO_DEFAULT, logdir)
@property
def get_reposdir(self):
# :api
"""Returns the value of reposdir"""
myrepodir = None
# put repo file into first reposdir which exists or create it
for rdir in self._get_value('reposdir'):
if os.path.exists(rdir):
myrepodir = rdir
break
if not myrepodir:
myrepodir = self._get_value('reposdir')[0]
dnf.util.ensure_dir(myrepodir)
return myrepodir
def _search_inside_installroot(self, optname):
prio = self._get_priority(optname)
# dont modify paths specified on commandline
if prio >= PRIO_COMMANDLINE:
return
val = self._get_value(optname)
# if it exists inside installroot use it (i.e. adjust configuration)
# for lists any component counts
if not isinstance(val, str):
if any(os.path.exists(os.path.join(self._get_value('installroot'),
p.lstrip('/'))) for p in val):
self._set_value(
optname,
libdnf.conf.VectorString([self._prepend_installroot_path(p) for p in val]),
prio
)
elif os.path.exists(os.path.join(self._get_value('installroot'), val.lstrip('/'))):
self._set_value(optname, self._prepend_installroot_path(val), prio)
def prepend_installroot(self, optname):
# :api
prio = self._get_priority(optname)
new_path = self._prepend_installroot_path(self._get_value(optname))
self._set_value(optname, new_path, prio)
def _prepend_installroot_path(self, path):
root_path = os.path.join(self._get_value('installroot'), path.lstrip('/'))
return libdnf.conf.ConfigParser.substitute(root_path, self.substitutions)
def _configure_from_options(self, opts):
"""Configure parts of CLI from the opts """
config_args = ['plugins', 'version', 'config_file_path',
'debuglevel', 'errorlevel', 'installroot',
'best', 'assumeyes', 'assumeno', 'clean_requirements_on_remove', 'gpgcheck',
'showdupesfromrepos', 'plugins', 'ip_resolve',
'rpmverbosity', 'disable_excludes', 'color',
'downloadonly', 'exclude', 'excludepkgs', 'skip_broken',
'tsflags', 'arch', 'basearch', 'ignorearch', 'cacheonly', 'comment']
for name in config_args:
value = getattr(opts, name, None)
if value is not None and value != []:
if self._has_option(name):
appendValue = False
if self._config:
try:
appendValue = self._config.optBinds().at(name).getAddValue()
except RuntimeError:
# fails if option with "name" does not exist in _config (libdnf)
pass
if appendValue:
add_priority = dnf.conf.PRIO_COMMANDLINE
if add_priority < self._get_priority(name):
add_priority = self._get_priority(name)
for item in value:
if item:
self._set_value(name, self._get_value(name) + [item], add_priority)
else:
self._set_value(name, [], dnf.conf.PRIO_COMMANDLINE)
else:
self._set_value(name, value, dnf.conf.PRIO_COMMANDLINE)
elif hasattr(self, name):
setattr(self, name, value)
else:
logger.warning(_('Unknown configuration option: %s = %s'),
ucd(name), ucd(value))
if getattr(opts, 'gpgcheck', None) is False:
self._set_value("localpkg_gpgcheck", False, dnf.conf.PRIO_COMMANDLINE)
if hasattr(opts, 'main_setopts'):
# now set all the non-first-start opts from main from our setopts
# pylint: disable=W0212
for name, values in opts.main_setopts.items():
for val in values:
if hasattr(self._config, name):
try:
# values in main_setopts are strings, try to parse it using newString()
self._config.optBinds().at(name).newString(PRIO_COMMANDLINE, val)
except RuntimeError as e:
raise dnf.exceptions.ConfigError(
_("Error parsing --setopt with key '%s', value '%s': %s")
% (name, val, str(e)), raw_error=str(e))
else:
# if config option with "name" doesn't exist in _config, it could be defined
# only in Python layer
if hasattr(self, name):
setattr(self, name, val)
else:
msg = _("Main config did not have a %s attr. before setopt")
logger.warning(msg, name)
def exclude_pkgs(self, pkgs):
# :api
name = "excludepkgs"
if pkgs is not None and pkgs != []:
if self._has_option(name):
self._set_value(name, pkgs, dnf.conf.PRIO_COMMANDLINE)
else:
logger.warning(_('Unknown configuration option: %s = %s'),
ucd(name), ucd(pkgs))
def _adjust_conf_options(self):
"""Adjust conf options interactions"""
skip_broken_val = self._get_value('skip_broken')
if skip_broken_val:
self._set_value('strict', not skip_broken_val, self._get_priority('skip_broken'))
@property
def releasever(self):
# :api
return self.substitutions.get('releasever')
@releasever.setter
def releasever(self, val):
# :api
if val is None:
self.substitutions.pop('releasever', None)
return
self.substitutions['releasever'] = str(val)
@property
def arch(self):
# :api
return self.substitutions.get('arch')
@arch.setter
def arch(self, val):
# :api
if val is None:
self.substitutions.pop('arch', None)
return
if val not in dnf.rpm._BASEARCH_MAP.keys():
msg = _('Incorrect or unknown "{}": {}')
raise dnf.exceptions.Error(msg.format("arch", val))
self.substitutions['arch'] = val
self.basearch = dnf.rpm.basearch(val)
@property
def basearch(self):
# :api
return self.substitutions.get('basearch')
@basearch.setter
def basearch(self, val):
# :api
if val is None:
self.substitutions.pop('basearch', None)
return
if val not in dnf.rpm._BASEARCH_MAP.values():
msg = _('Incorrect or unknown "{}": {}')
raise dnf.exceptions.Error(msg.format("basearch", val))
self.substitutions['basearch'] = val
def read(self, filename=None, priority=PRIO_DEFAULT):
# :api
if filename is None:
filename = self._get_value('config_file_path')
parser = libdnf.conf.ConfigParser()
try:
parser.read(filename)
except RuntimeError as e:
raise dnf.exceptions.ConfigError(_('Parsing file "%s" failed: %s') % (filename, e))
except IOError as e:
logger.warning(e)
self._populate(parser, self._section, filename, priority)
# update to where we read the file from
self._set_value('config_file_path', filename, priority)
@property
def verbose(self):
return self._get_value('debuglevel') >= dnf.const.VERBOSE_LEVEL
class RepoConf(BaseConfig):
"""Option definitions for repository INI file sections."""
def __init__(self, parent, section=None, parser=None):
super(RepoConf, self).__init__(libdnf.conf.ConfigRepo(
parent._config if parent else libdnf.conf.ConfigMain()), section, parser)
self._masterConfig = parent._config if parent else libdnf.conf.ConfigMain()
def _configure_from_options(self, opts):
"""Configure repos from the opts. """
if getattr(opts, 'gpgcheck', None) is False:
for optname in ['gpgcheck', 'repo_gpgcheck']:
self._set_value(optname, False, dnf.conf.PRIO_COMMANDLINE)
repo_setopts = getattr(opts, 'repo_setopts', {})
for repoid, setopts in repo_setopts.items():
if not fnmatch.fnmatch(self._section, repoid):
continue
for name, values in setopts.items():
for val in values:
if hasattr(self._config, name):
try:
# values in repo_setopts are strings, try to parse it using newString()
self._config.optBinds().at(name).newString(PRIO_COMMANDLINE, val)
except RuntimeError as e:
raise dnf.exceptions.ConfigError(
_("Error parsing --setopt with key '%s.%s', value '%s': %s")
% (self._section, name, val, str(e)), raw_error=str(e))
else:
msg = _("Repo %s did not have a %s attr. before setopt")
logger.warning(msg, self._section, name)
| gpl-2.0 | -6,741,883,206,483,219,000 | 39.616228 | 100 | 0.553696 | false |
edx/course-discovery | course_discovery/apps/api/filters.py | 1 | 7646 | import datetime
import logging
import pytz
from django.contrib.auth import get_user_model
from django.db.models import Q, QuerySet
from django.utils.translation import ugettext as _
from django_filters import rest_framework as filters
from dry_rest_permissions.generics import DRYPermissionFiltersBase
from guardian.shortcuts import get_objects_for_user
from rest_framework.exceptions import NotFound, PermissionDenied
from course_discovery.apps.api.utils import cast2int
from course_discovery.apps.course_metadata.choices import CourseRunStatus, ProgramStatus
from course_discovery.apps.course_metadata.models import (
Course, CourseEditor, CourseRun, LevelType, Organization, Person, Program, ProgramType, Subject, Topic
)
logger = logging.getLogger(__name__)
User = get_user_model()
class PermissionsFilter(DRYPermissionFiltersBase):
def filter_list_queryset(self, request, queryset, view):
""" Filters the list queryset, returning only the objects accessible by the user.
If a username parameter is passed on the querystring, the filter will will return objects accessible by
the user corresponding to the given username. NOTE: This functionality is only accessible to staff users.
Raises:
PermissionDenied -- If a username querystring parameter is specified, but the user is not a staff user.
Http404 -- If no User corresponding to the given username exists.
Returns:
QuerySet
"""
perm = queryset.model.get_permission('view')
user = request.user
username = request.query_params.get('username', None)
if username:
if request.user.is_staff:
try:
user = User.objects.get(username=username)
except User.DoesNotExist as not_found:
raise NotFound(
_('No user with the username [{username}] exists.').format(username=username)) from not_found
else:
raise PermissionDenied(
_('Only staff users are permitted to filter by username. Remove the username parameter.')
)
return get_objects_for_user(user, perm)
class CharListFilter(filters.CharFilter):
""" Filters a field via a comma-delimited list of values. """
def filter(self, qs, value):
if value not in (None, ''):
value = value.split(',')
return super().filter(qs, value)
class UUIDListFilter(CharListFilter):
""" Filters a field via a comma-delimited list of UUIDs. """
def __init__(self, field_name='uuid', label=None, widget=None, method=None, lookup_expr='in', required=False,
distinct=False, exclude=False, **kwargs):
super().__init__(field_name=field_name, label=label, widget=widget, method=method, lookup_expr=lookup_expr,
required=required, distinct=distinct, exclude=exclude, **kwargs)
class FilterSetMixin:
def _apply_filter(self, name, queryset, value):
return getattr(queryset, name)() if cast2int(value, name) else queryset
def filter_active(self, queryset, name, value):
return self._apply_filter(name, queryset, value)
def filter_marketable(self, queryset, name, value):
return self._apply_filter(name, queryset, value)
class CourseFilter(filters.FilterSet):
keys = CharListFilter(field_name='key', lookup_expr='in')
uuids = UUIDListFilter()
course_run_statuses = CharListFilter(method='filter_by_course_run_statuses')
editors = CharListFilter(field_name='editors__user__pk', lookup_expr='in', distinct=True)
class Meta:
model = Course
fields = ('keys', 'uuids',)
def filter_by_course_run_statuses(self, queryset, _, value):
statuses = set(value.split(','))
or_queries = [] # a list of Q() expressions to add to our filter as alternatives to status check
if 'in_review' in statuses: # any of our review statuses
statuses.remove('in_review')
statuses.add(CourseRunStatus.LegalReview)
statuses.add(CourseRunStatus.InternalReview)
if 'unsubmitted' in statuses: # unpublished and unarchived
statuses.remove('unsubmitted')
# "is not archived" logic stolen from CourseRun.has_ended
now = datetime.datetime.now(pytz.UTC)
or_queries.append(Q(course_runs__status=CourseRunStatus.Unpublished) & ~Q(course_runs__end__lt=now))
status_check = Q(course_runs__status__in=statuses)
for query in or_queries:
status_check |= query
return queryset.filter(status_check, course_runs__hidden=False).distinct()
class CourseRunFilter(FilterSetMixin, filters.FilterSet):
active = filters.BooleanFilter(method='filter_active')
marketable = filters.BooleanFilter(method='filter_marketable')
keys = CharListFilter(field_name='key', lookup_expr='in')
license = filters.CharFilter(field_name='license', lookup_expr='iexact')
@property
def qs(self):
# This endpoint supports query via Haystack. If that form of filtering is active,
# do not attempt to treat the queryset as a normal Django queryset.
if not isinstance(self.queryset, QuerySet):
return self.queryset
return super().qs
class Meta:
model = CourseRun
fields = ('keys', 'hidden', 'license',)
class ProgramFilter(FilterSetMixin, filters.FilterSet):
marketable = filters.BooleanFilter(method='filter_marketable')
status = filters.MultipleChoiceFilter(choices=ProgramStatus.choices)
type = filters.CharFilter(field_name='type__translations__name_t', lookup_expr='iexact')
types = CharListFilter(field_name='type__slug', lookup_expr='in')
uuids = UUIDListFilter()
class Meta:
model = Program
fields = ('hidden', 'marketable', 'marketing_slug', 'status', 'type', 'types',)
class ProgramTypeFilter(filters.FilterSet):
language_code = filters.CharFilter(method='_set_language')
def _set_language(self, queryset, _, language_code):
return queryset.language(language_code)
class Meta:
model = ProgramType
fields = ('language_code',)
class LevelTypeFilter(filters.FilterSet):
language_code = filters.CharFilter(method='_set_language')
def _set_language(self, queryset, _, language_code):
return queryset.language(language_code)
class Meta:
model = LevelType
fields = ('language_code',)
class OrganizationFilter(filters.FilterSet):
tags = CharListFilter(field_name='tags__name', lookup_expr='in')
uuids = UUIDListFilter()
class Meta:
model = Organization
fields = ('tags', 'uuids',)
class PersonFilter(filters.FilterSet):
class Meta:
model = Person
fields = ('slug',)
class SubjectFilter(filters.FilterSet):
language_code = filters.CharFilter(method='_set_language')
def _set_language(self, queryset, _, language_code):
return queryset.language(language_code)
class Meta:
model = Subject
fields = ('slug', 'language_code')
class TopicFilter(filters.FilterSet):
language_code = filters.CharFilter(method='_set_language')
def _set_language(self, queryset, _, language_code):
return queryset.language(language_code)
class Meta:
model = Topic
fields = ('slug', 'language_code')
class CourseEditorFilter(filters.FilterSet):
course = filters.CharFilter(field_name='course__uuid')
class Meta:
model = CourseEditor
fields = ('course',)
| agpl-3.0 | -7,183,579,204,389,890,000 | 34.896714 | 117 | 0.670154 | false |
oerdnj/json-glib | build/win32/gen-file-list-jsonglib.py | 1 | 3552 | #!/usr/bin/python
# vim: encoding=utf-8
# Generate the file lists for processing with g-ir-scanner
import os
import sys
import re
import string
import subprocess
import optparse
def gen_jsonglib_filelist(srcroot, subdir, dest):
vars = read_vars_from_AM(os.path.join(srcroot, subdir, 'Makefile.am'),
vars = {'srcdir':'../json-glib',
'top_srcdir':'..'},
conds = {'HAVE_INTROSPECTION':True},
filters = ['Json_1_0_gir_FILES'])
files = vars['Json_1_0_gir_FILES'].split()
if (srcroot == '..\\..'):
relative_srcdir = '..'
else:
relative_srcdir = 'srcroot'
with open(dest, 'w') as d:
for i in files:
d.write(relative_srcdir + '\\' + i.replace('/', '\\') + '\n')
def read_vars_from_AM(path, vars = {}, conds = {}, filters = None):
'''
path: path to the Makefile.am
vars: predefined variables
conds: condition variables for Makefile
filters: if None, all variables defined are returned,
otherwise, it is a list contains that variables should be returned
'''
cur_vars = vars.copy()
RE_AM_VAR_REF = re.compile(r'\$\((\w+?)\)')
RE_AM_VAR = re.compile(r'^\s*(\w+)\s*=(.*)$')
RE_AM_INCLUDE = re.compile(r'^\s*include\s+(\w+)')
RE_AM_CONTINUING = re.compile(r'\\\s*$')
RE_AM_IF = re.compile(r'^\s*if\s+(\w+)')
RE_AM_ELSE = re.compile(r'^\s*else')
RE_AM_ENDIF = re.compile(r'^\s*endif')
def am_eval(cont):
return RE_AM_VAR_REF.sub(lambda x: cur_vars.get(x.group(1), ''), cont)
with open(path, 'r') as f:
contents = f.readlines()
#combine continuing lines
i = 0
ncont = []
while i < len(contents):
line = contents[i]
if RE_AM_CONTINUING.search(line):
line = RE_AM_CONTINUING.sub('', line)
j = i + 1
while j < len(contents) and RE_AM_CONTINUING.search(contents[j]):
line += RE_AM_CONTINUING.sub('', contents[j])
j += 1
else:
if j < len(contents):
line += contents[j]
i = j
else:
i += 1
ncont.append(line)
#include, var define, var evaluation
i = -1
skip = False
oldskip = []
while i < len(ncont) - 1:
i += 1
line = ncont[i]
mo = RE_AM_IF.search(line)
if mo:
oldskip.append(skip)
skip = False if mo.group(1) in conds and conds[mo.group(1)] \
else True
continue
mo = RE_AM_ELSE.search(line)
if mo:
skip = not skip
continue
mo = RE_AM_ENDIF.search(line)
if mo:
skip = oldskip.pop()
continue
if not skip:
mo = RE_AM_INCLUDE.search(line)
if mo:
cur_vars.update(read_vars_from_AM(am_eval(mo.group(1)), cur_vars, conds, None))
continue
mo = RE_AM_VAR.search(line)
if mo:
cur_vars[mo.group(1)] = am_eval(mo.group(2).strip())
continue
#filter:
if filters != None:
ret = {}
for i in filters:
ret[i] = cur_vars.get(i, '')
return ret
else:
return cur_vars
def main(argv):
srcroot = '..\\..'
subdir = 'json-glib'
gen_jsonglib_filelist(srcroot, subdir, 'json_list')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| lgpl-2.1 | 9,128,459,780,183,954,000 | 30.157895 | 95 | 0.50366 | false |
postalXdude/PySplash | py_splash/driver.py | 1 | 10769 | import json
try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
from py_splash.static import (
LUA_SOURCE,
GET_HTML_ONLY,
GET_ALL_DATA,
RETURN_HTML_ONLY,
RETURN_ALL_DATA,
PREPARE_COOKIES,
JS_PIECE,
SET_PROXY,
USER_AGENT,
GO
)
from py_splash.exceptions import (
SplashTimeoutError,
SplashInternalError,
SplashRenderError,
SplashUnsupportedContentTypeError,
SplashBadRequestError,
SplashSyntaxError
)
class Driver(object):
def __init__(self, splash_url='http://127.0.0.1:8050', user_agent=None,
proxy=None, proxy_user_pass=None, proxy_type=None):
"""
:param splash_url: Url to target running splash container. It can be on local or external machine.
Defaults to local machine.
:param user_agent: Sets user agent in the headers. It must be string.
(optional) It is used until this object cease to exists.
:param proxy: Proxy server that will be used by splash ('example.com:8080').
(optional)
:param proxy_user_pass: If the proxy server requires authentication, send username and password in this
(optional) format - 'user:pass'. If there is no password - 'user:'.
:param proxy_type: Type of proxy server. It can be 'HTTP' or 'SOCKS5'. This param is ignoring lower case.
(optional) It can be 'http' or 'HtTp'. Defaults to 'HTTP'.
"""
self.splash_url = '{}/execute'.format(splash_url)
self.user_agent = user_agent
self.proxy = proxy
self.proxy_user_pass = proxy_user_pass
self.proxy_type = proxy_type
def load_url(self, url=None, condition='no_condition', timeout=20, wait=0.5, backup_wait=None,
post=None, cookies=None, headers=None, full_info=False):
"""
:param url: Url for splash to target desired resource.
:param condition: List of xpath expressions ["//td[@class='splash']", etc.] on which splash will wait.
(optional) Or it can be custom js code. It needs to return True or False.
If never fulfilled, timeout occurs.
If not set, defaults to True.
:param timeout: Amount of time in seconds, until splash stops loading page and throws timeout error.
:param wait: Amount of time in seconds, for how long will splash wait and
check if condition is fulfilled.
:param backup_wait: If condition is fulfilled, and data is still not there (Tested this with really slow
(optional) proxies) use this param to add extra seconds to wait after condition is fulfilled.
:param post: Post data to be sent for POST request. Dictionary {'user': 'bla', 'pass': 'bla'}.
(optional) Or it can be just JSON string or any other string format. In this case headers must be
set up to match string type. If JSON - headers={["content-type"]="application/json"}, etc.
:param cookies: Custom cookies in form of dictionary that will be used in request.
(optional)
:param headers: Custom headers in form of dictionary that will be used in request.
(optional)
:param full_info: If set to True, extra data will be returned in form of JSON that contains:
(optional) html, cookies, headers, current url, and status code.
:returns: Generates url that will be sent to splash. When request is made with generated url,
there are three possible responses: Html page, full info(see above) or error.
"""
prepared_data = self._prepare_data_for_request(post, headers, cookies)
condition_piece = JS_PIECE
if isinstance(condition, list) and condition:
condition_source = [condition_piece.format(xpath.replace('[', '\\[').replace(']', '\\]')).strip('\n')
for xpath in condition]
condition_source = ' '.join(condition_source)[:-22]
elif isinstance(condition, str) and condition:
if condition == 'no_condition':
condition_source = 'return true;'
else:
condition_pieces = condition.split('\n')
condition_pieces = [piece.strip() for piece in condition_pieces]
condition_source = ' '.join(condition_pieces).replace("'", "\\'")
else:
raise ValueError("Function must receive a list of xpath expressions or custom js code!")
js_start = 'document.evaluate(' if isinstance(condition, list) else '(function(){'
js_end = '' if isinstance(condition, list) else '})();'
lua_source = LUA_SOURCE.format(
prepared_data,
'\'' if isinstance(condition, str) else '[[',
'{} {} {}'.format(js_start, condition_source, js_end),
'\'' if isinstance(condition, str) else ']]',
'\tsplash:wait({})'.format(backup_wait) if backup_wait else '',
GET_ALL_DATA if full_info else GET_HTML_ONLY,
RETURN_ALL_DATA if full_info else RETURN_HTML_ONLY
)
return '{}?lua_source={}&url={}&timeout={}&wait={}'.format(
self.splash_url,
quote_plus(lua_source),
quote_plus(url),
quote_plus(str(timeout)),
quote_plus(str(wait))
)
def _prepare_data_for_request(self, post, headers, cookies, images_enabled=False):
prepared_data = []
form_data = True
if images_enabled:
prepared_data.append('\tsplash.images_enabled = true\n')
else:
prepared_data.append('\tsplash.images_enabled = false\n')
if self.proxy:
proxy_init = []
host = self.proxy[:self.proxy.rfind(':')]
port = self.proxy[self.proxy.rfind(':') + 1:]
proxy_init.append('{}host = \'{}\',\n{}port = {},'.format('\t' * 3, host, '\t' * 3, port))
if self.proxy_user_pass:
username = self.proxy_user_pass[:self.proxy_user_pass.find(':')]
password = self.proxy_user_pass[self.proxy_user_pass.find(':') + 1:]
proxy_init.append('{}username = \'{}\',\n{}password = \'{}\','.format(
'\t' * 3, username.replace("'", "\\'"),
'\t' * 3, password.replace("'", "\\'")
))
if self.proxy_type:
proxy_init.append('{}type = "{}",'.format('\t' * 3, self.proxy_type.upper()))
proxy_init[-1] = proxy_init[-1].rstrip(',')
prepared_data.append(SET_PROXY.format('{', '\n'.join(proxy_init), '}'))
if self.user_agent:
prepared_data.append(USER_AGENT.format(self.user_agent.replace("'", "\\'")))
if isinstance(post, dict) and post:
post = Driver._prepare_lua_table('post', post)
prepared_data.append(post)
elif isinstance(post, str) and post:
form_data = False
body = '''
local body = '{}'
'''.format(post.replace("'", "\\'"))
prepared_data.append(body)
if isinstance(headers, dict) and headers:
headers = Driver._prepare_lua_table('headers', headers)
prepared_data.append(headers)
if isinstance(cookies, dict) and cookies:
table_values = ["{}{}name='{}', value='{}'{},".format(
'\t' * 2,
'{',
name.replace("'", "\\'"),
str(value).replace("'", "\\'") if value else '',
'}'
)
for name, value in cookies.items()]
table_values[-1] = table_values[-1].rstrip(',')
cookies = PREPARE_COOKIES.format('{', '\n'.join(table_values), '}')
prepared_data.append(cookies)
prepared_data.append(GO.format(
'{',
'headers' if headers else 'nil',
'POST' if post else 'GET',
'body' if post and not form_data else 'nil',
'post' if post and form_data else 'nil',
'}'
))
return '\n'.join(prepared_data)
@staticmethod
def _prepare_lua_table(data_type, data):
table_skeleton = '''
local {} = {}
{}
{}
'''
table_values = ["{}['{}'] = '{}',".format(
'\t' * 2,
name.replace("'", "\\'"),
str(value).replace("'", "\\'") if value else '',
)
for name, value in data.items()]
table_values[-1] = table_values[-1].rstrip(',')
return table_skeleton.format(data_type, '{', '\n'.join(table_values), '}')
def error_check(self, response):
"""
:param response: It must be utf-8 based string or unicode
"""
try:
potential_error = json.loads(response)
except ValueError:
potential_error = {}
error_keys = ('info', 'type', 'description', 'error')
error = all(key in error_keys for key in potential_error.keys())
if error and len(potential_error.keys()) == len(error_keys):
if 'Timeout exceeded rendering page' in potential_error.get('description'):
raise SplashTimeoutError('Timeout exceeded rendering page')
elif 'Error rendering page' in potential_error.get('description'):
raise SplashRenderError('Error rendering page')
elif 'Unhandled internal error' in potential_error.get('description'):
raise SplashInternalError('Unhandled internal error')
elif 'Request Content-Type is not supported' in potential_error.get('description'):
raise SplashUnsupportedContentTypeError('Request Content-Type is not supported')
elif 'Error happened while executing Lua script' in potential_error.get('description'):
if potential_error.get('info').get('type', '').strip() == 'LUA_ERROR':
raise SplashBadRequestError(potential_error.get('info').get('error'))
elif potential_error.get('info').get('type', '').strip() == 'LUA_INIT_ERROR':
raise SplashSyntaxError('Lua syntax error')
elif potential_error.get('info').get('type', '').strip() == 'JS_ERROR':
raise SplashSyntaxError('Syntax error in splash condition')
else:
raise NotImplementedError(potential_error.get('info', response))
else:
raise NotImplementedError(response)
| mit | 8,227,596,697,306,968,000 | 42.955102 | 118 | 0.558919 | false |
knipknap/SpiffWorkflow | SpiffWorkflow/serializer/json.py | 1 | 2405 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import json
import uuid
from .dict import DictionarySerializer
from ..operators import Attrib
def object_hook(dct):
if '__uuid__' in dct:
return uuid.UUID(dct['__uuid__'])
if '__bytes__' in dct:
return dct['__bytes__'].encode('ascii')
if '__attrib__' in dct:
return Attrib(dct['__attrib__'])
return dct
def default(obj):
if isinstance(obj, uuid.UUID):
return {'__uuid__': obj.hex}
if isinstance(obj, bytes):
return {'__bytes__': obj.decode('ascii')}
if isinstance(obj, Attrib):
return {'__attrib__': obj.name}
raise TypeError('%r is not JSON serializable' % obj)
def loads(text):
return json.loads(text, object_hook=object_hook)
def dumps(dct):
return json.dumps(dct, sort_keys=True, default=default)
class JSONSerializer(DictionarySerializer):
def serialize_workflow_spec(self, wf_spec, **kwargs):
thedict = super(JSONSerializer, self).serialize_workflow_spec(
wf_spec, **kwargs)
return dumps(thedict)
def deserialize_workflow_spec(self, s_state, **kwargs):
thedict = loads(s_state)
return super(JSONSerializer, self).deserialize_workflow_spec(
thedict, **kwargs)
def serialize_workflow(self, workflow, **kwargs):
thedict = super(JSONSerializer, self).serialize_workflow(
workflow, **kwargs)
return dumps(thedict)
def deserialize_workflow(self, s_state, **kwargs):
thedict = loads(s_state)
return super(JSONSerializer, self).deserialize_workflow(
thedict, **kwargs)
| lgpl-3.0 | 4,921,409,628,840,042,000 | 30.233766 | 70 | 0.672765 | false |
xelphene/nmurl | nmurl/parsedns.py | 1 | 4660 | #!/usr/bin/env python
import re
import sys
class ParseError:
def setDefaults(self):
if self.format==None:
self.format='dig'
class RRSet(object):
"""an RRSet contains one or more resource records of the same name,
type and class (and possibly TTL, not sure what to do with that yet."""
def __init__(self, name=None, rclass=None, rtype=None, ttl=0, data=None):
self.name = name
self.rclass = rclass
self.rtype = rtype
self.ttl = ttl
if data==None:
self.data = []
else:
self.data = data
def addData(self, data):
if data not in self.data:
self.data.append(data)
def __iter__(self):
for datum in self.data:
yield RR(
name=self.name,
rclass=self.rclass,
rtype=self.rtype,
ttl=self.ttl,
data=datum)
def __str__(self):
return 'RRSet: %s %s %s' % (self.name, self.rclass, self.rtype)
class RRSetList(object):
"""builds a list of distinct RRSets from RRs."""
def __init__(self):
self.rrsets = {}
self._resolutionsCache = None
def __iter__(self):
for rrset in self.rrsets.values():
yield rrset
def addRR(self, rr):
self._resolutionsCache = None
key = (rr.name,rr.rclass,rr.rtype)
if not self.rrsets.has_key(key):
self.rrsets[key] = RRSet(
name=rr.name,
rtype=rr.rtype,
rclass=rr.rclass )
self.rrsets[key].ttl = rr.ttl # TODO: not sure about this
self.rrsets[key].addData(rr.data)
def names(self):
"""return all domain names used anywhere in this query"""
s = set()
for rrset in self:
n = rrset.name
if n.endswith('.'): n=n[:-1]
s.add(rrset.name)
if rrset.rtype=='CNAME':
for datum in rrset.data:
if datum.endswith('.'):
datum = datum[:-1]
s.add(datum)
return s
def buildResolutions(self):
"""return a dictionary with IP addresses for keys and arrays of
hostnames that resolve to it (either via n A or CNAME record) as the
corresponding values."""
r={
'reverse': {},
'forward': {}
}
for rrset in self:
if rrset.rtype!='A':
continue
if not r['forward'].has_key(rrset.name):
r['forward'][rrset.name] = set()
for address in rrset.data:
r['forward'][rrset.name].add(address)
if not r['reverse'].has_key(address):
r['reverse'][address] = set()
r['reverse'][address].add(rrset.name)
for rrset in self:
if rrset.rtype!='CNAME':
continue
for datum in rrset.data:
# datum is the CNAME TARGET
if datum.endswith('.'):
datum = datum[:-1]
if r['forward'].has_key(datum):
addrs = r['forward'][datum]
r['forward'][rrset.name] = addrs
for addr in addrs:
r['reverse'][addr].add(rrset.name)
return r
def getResolutions(self):
if self._resolutionsCache:
return self._resolutionsCache
else:
self._resolutionsCache = self.buildResolutions()
return self._resolutionsCache
def namesForAddress(self, addr):
if self.getResolutions()['reverse'].has_key(addr):
return self.getResolutions()['reverse'][addr]
else:
return set([])
def dump(self):
print '--- dns query'
for rrset in self:
print rrset
for rr in rrset:
print ' ',rr.simpleFormat()
print ' res:',self.getResolutions()
class RR(object):
def __init__(self, name=None, rclass=None, rtype=None, ttl=0, data=None):
self.name = name
self.rclass = rclass
self.rtype = rtype
self.ttl = ttl
self.data = data
def simpleFormat(self):
if len(self.data)>30:
data = self.data[0:30]+'...'
else:
data = self.data
return '%-30s %2s %-4s %s' % (
self.name, self.rclass, self.rtype, data)
def fullFormat(self):
return '%-30s %-10d %-2s %-4s %s' % (
self.name, self.ttl, self.rclass, self.rtype, self.data)
def __str__(self):
return 'Fact: %s' % self.simpleFormat()
#return 'Fact: %s %d %s %s %s' % (
# self.name, self.ttl, self.rclass, self.rtype, self.data)
class RRParseError(Exception):
def __init__(self, s, reason):
self.string = s
self.reason = reason
def __str__(self):
return 'failed to parse %s as fact: %s' % (
repr(self.string), self.reason )
class RRParser(object):
re_rr = re.compile('^(\S+)\.\s+([0-9]+)\s+([A-Z]+)\s+([A-Z]+)\s+(.*)$')
@classmethod
def parseString(cls, s):
mg = cls.re_rr.match(s)
if mg==None:
raise RRParseError(s,'invalid format')
return RR(
name = mg.group(1),
rclass = mg.group(3),
rtype = mg.group(4),
ttl = int(mg.group(2)),
data = mg.group(5)
)
def parseFile(f, rrsl=None):
lineno=0
if rrsl==None:
rrsl = RRSetList()
for line in f:
lineno+=1
try:
rr = RRParser.parseString(line)
rrsl.addRR(rr)
except RRParseError, rpe:
#print '<failed>',rpe
pass
return rrsl
| gpl-3.0 | 633,926,550,901,782,000 | 21.848039 | 74 | 0.625751 | false |
mivok/bear_backup | bear_backup.py | 1 | 7256 | #!/usr/bin/env python3
import argparse
import datetime
import glob
import json
import os.path
import pathlib
import re
import sqlite3
import subprocess
import sys
import zipfile
# Paths to various files
approot = os.path.expanduser("~/Library/Containers/net.shinyfrog.bear/Data")
dbpath = os.path.join(approot, "Documents/Application Data/database.sqlite")
assetpath = os.path.join(approot, "Documents/Application Data/Local Files")
imagepath = os.path.join(assetpath, "Note Images")
filepath = os.path.join(assetpath, "Note Files")
asset_re = re.compile(r'\[(image|file):([^]]+)\]')
# The epoch for apple timestamps in the bear database is 1 Jan 2001, so we
# need to add the following offset to the timestamps to get a unix timestamp
apple_epoch = 978307200
class Note(object):
def __init__(self, db, note_id):
self.db = db
self.note_data = self.db.execute("SELECT * FROM ZSFNOTE WHERE Z_PK=?",
(note_id,)).fetchone()
def title(self):
return self.note_data["ZTITLE"]
def text(self):
return self.note_data["ZTEXT"]
def last_modified(self):
return datetime.datetime.fromtimestamp(
self.note_data["ZMODIFICATIONDATE"] + apple_epoch)
def text_with_converted_asset_paths(self):
"""Returns the note text, but with any asset paths changed to point to
the textbundle location.
In addition, the image/file prefixes to the image path are removed
too, because in an exported bearnote file it's just
[assets/filename.ext]
"""
return re.sub(asset_re,
lambda m: "[%s]" % (self.convert_asset_path(m[2])),
self.text())
def convert_asset_path(self, filename):
"""Strips any path to an asset and replaces it with assets/ for use in
textbundles"""
return re.sub(r'^.*/', 'assets/', filename)
def asset_filenames(self):
filenames = set()
for m in re.findall(asset_re, self.text()):
if m[0] == 'file':
filenames.add(os.path.join(filepath, m[1]))
elif m[0] == 'image':
filenames.add(os.path.join(imagepath, m[1]))
return filenames
def filename(self):
"""Generates a filename from the note title, without any file
extension"""
filename = self.title()
# Strip anything that isn't alphanumeric or spaces
filename = re.sub('[^\w\s]+', '_', filename)
# Collapse spaces
filename = re.sub('\s+', ' ', filename)
return filename
def full_filename(self):
"""Gets the full filename of the note on disk, including the .bearnote
extension"""
return pathlib.Path(self.filename()).with_suffix(".bearnote")
def existing_file_is_newer(self):
filename = self.full_filename()
if not filename.exists():
return False
mtime = datetime.datetime.fromtimestamp(filename.stat().st_mtime)
if mtime < self.last_modified():
return False
return True
def zip_note(self, filename=None):
"""Adds the note to a zipfile in bearnote format.
The bearnote format is almost identical to the textbundle format,
except that asset (image and pdf) links aren't markdown images,
they're just `[path/to/file]` (without backticks)
"""
if filename is None:
filename = self.filename()
filename = pathlib.Path(filename).with_suffix(".bearnote")
zip_file = zipfile.ZipFile(str(filename), "w",
compression=zipfile.ZIP_DEFLATED)
# Add info.json
zip_file.writestr(os.path.join(filename, "info.json"), json.dumps({
"type": "public.plain-text",
"version": "2"
}))
# Add text
zip_file.writestr(os.path.join(filename, "text.txt"),
self.text_with_converted_asset_paths())
# Add assets
for filename in self.asset_filenames():
zip_file.write(filename,
os.path.join(filename,
self.convert_asset_path(filename)))
class BearDb(object):
def __init__(self):
self.db = sqlite3.connect("file:%s?mode=ro" % dbpath, uri=True)
self.db.row_factory = sqlite3.Row
def all_notes(self):
ids = self.db.execute(
"SELECT Z_PK FROM ZSFNOTE WHERE ZTRASHED != 1").fetchall()
notes = [Note(self.db, i["Z_PK"]) for i in ids]
return notes
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Back up bear notes")
parser.add_argument('-v', '--verbose', action='store_true',
help='print additional messages during backup')
parser.add_argument('-d', '--debug', action='store_true',
help="don't back up - bring up a debug console instead")
parser.add_argument('-f', '--force', action='store_true',
help="Overwrite existing files even if newer")
parser.add_argument('-n', '--dry-run', action='store_true',
help="Don't create/delete any files, just print "
"what would happen")
parser.add_argument('-o', '--notify', action='store_true',
help="Show an OSX notification once backup is complete")
parser.add_argument('-r', '--remove', action='store_true',
help="Remove any deleted notes from the backup")
parser.add_argument('dirname', metavar='DIRECTORY', type=os.path.expanduser,
help='directory to back up notes to')
args = parser.parse_args()
if args.dry_run:
# Dry run implies verbose
args.verbose = True
if args.verbose:
print("Backing up to: %s" % args.dirname)
# Make sure the directory we are backing up to exists, then cd into it
os.makedirs(args.dirname, exist_ok=True)
os.chdir(args.dirname)
bear_db = BearDb()
notes = bear_db.all_notes()
if args.debug:
import code
code.interact(banner="Debug console", local=locals())
sys.exit(0)
for note in notes:
if not args.force:
if note.existing_file_is_newer():
continue
if args.dry_run:
print("Would back up: %s" % note.filename())
else:
if args.verbose:
print("Backing up: %s" % note.filename())
note.zip_note()
if args.remove:
keep_notes = {str(note.full_filename()) for note in notes}
all_notes = set(glob.glob("*.bearnote"))
delete_notes = all_notes - keep_notes
for note in delete_notes:
if args.dry_run:
print("Would delete: %s" % note)
else:
if args.verbose:
print("Deleting %s" % note)
os.remove(note)
if args.notify:
text = "Backed up notes to %s" % args.dirname
title = "Bear notes backup"
subprocess.run(["osascript","-e",
"display notification \"%s\" with title \"%s\"" % (
text, title)])
| mit | 2,395,900,684,026,304,500 | 35.646465 | 80 | 0.577729 | false |
cedrick-f/pySyLiC | src/fitting.py | 1 | 10186 | #!/usr/bin/env python
# -*- coding: ISO-8859-1 -*-
##This file is part of pySylic
#############################################################################
#############################################################################
## ##
## Fitting ##
## ##
#############################################################################
#############################################################################
## Copyright (C) 2009-2010 Cédrick FAURY
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#8
# You should have received a copy of the GNU General Public License
# along with pySylic; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import scipy.interpolate
import scipy
from scipy.optimize import curve_fit
from numpy import *
import csv
import wx
# Fonctions de référence
def rep1(t, K, T):
return K*(1.-scipy.exp(-t/T))
def rep2(t, K, z, w):
T1 = -1/(-z*w-w*sqrt(z*z-1))
T2 = -1/(-z*w+w*sqrt(z*z-1))
return K*(1-(T1*exp(-t/T1)-T2*exp(-t/T2))/(T1-T2))
#def rep2(t, K, z, w):
# _w = w*sqrt(z*z-1)
# p1 = -z*w + _w
# p2 = -z*w - _w
# B = z*z/(p1*(p1-p2))
# C = z*z/(p2*(p2-p1))
# return K*(1 + B*exp(p1*t) + C*exp(p2*t))
def rep4(t, K, z, w):
a = z*w
b = w*sqrt(1-z*z)
return K*(1 - exp(-a*t)*cos(b*t) - a/b*exp(-a*t)*sin(b*t))
fct = [rep1, rep2, rep4]
def message(mess):
dlg = wx.MessageDialog(None, mess,
_(u'Erreur'),
wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
# points de mesure
def getPointsMesure(fichierCSV):
def conv(sr):
s = []
for l in sr:
la = asfarray(l)
s.append(la)
s = array(s)
s = transpose(s)
return s
for sep in [';', ',', ' ']:
try:
spamReader = csv.reader(open(fichierCSV, 'rb'), delimiter = sep)
except:
message(_(u"Impossible d'ouvrir le fichier :\n") + fichierCSV)
try:
s = conv(spamReader)
except:
s = []
valid = True
try:
t = s[0]
y_meas = s[1]
except:
valid = False
if valid:
return t, y_meas
message(_(u"Le fichier %s \nn'a pas un format valide.") %fichierCSV)
return None, None
# Paramètres initiaux
p0 = [[1,1],
[1, 2, 1],
[1, 0.5, 1]]
def ajuster(x, y, mode = 0):
with errstate(all='ignore'):
var = []
par = []
for _fct, _p0 in zip(fct, p0):
try:
popt, pcov = curve_fit(_fct, x, y, _p0)
except RuntimeError:
continue
var.append(max(diag(pcov)))
par.append(popt)
if mode == 0:
i = var.index(min(var))
elif mode == 1:
i = 0
else:
i = var.index(min(var[1:]))
return [par[i], fct[i]]
##########################################################################################################
##########################################################################################################
#
# Gestion des reponses temporelles
#
##########################################################################################################
##########################################################################################################
from CedWidgets import *
import graphmpl as graph
class WinAjustement(wx.MiniFrame, PrintHandler):
def __init__(self, parent, pos=wx.DefaultPosition, size=wx.DefaultSize,
style = globdef.STYLE_FENETRE):
# Variable de l'état d'affichage : complet = tous les widgets
self.AffComplet = True
self.FT = None
self.Reponse = None
self.Mesure = None
size = (414,550)
wx.MiniFrame.__init__(self, parent, -1, _(u"Ajustement de réponse indicielle"), pos, size, style)
# self.SetMinSize(size)
self.SetAutoLayout(True)
self.initPrintHandler(PrintoutWx, parent, globdef.PRINT_PAPIER_DEFAUT, globdef.PRINT_MODE_DEFAUT)
#
# Zone de tracé
#
outils = ["BGrille", "", "BZoomA", "BZoomP", "BDepla", "BEchel", "", "BCurse", 'BImpri', "", "BExpor",'BParam']
self.ZoneAjustement = graph.ZoneGraphOutils(self, parent, 3, outils, tempo = True)
self.ZoneAjustement.Add(graph.ZoneGraphAjustement(self.ZoneAjustement, self.ZoneAjustement, _(u"Ajustement réponse indicielle")))
# self.zoneReponse = graph.ZoneGraphReponse(self, parent)
#
# Zone d'affichage de la FT
#
self.pop = PopPanel(self)
sizer0 = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.BmpFT = ScrolledBitmap(self.pop, -1, wx.NullBitmap)
sizer.Add(self.BmpFT, 1, flag = wx.EXPAND|wx.ALL, border = 5)
self.pop.SetSizerAndFit(sizer)
panelbtn = wx.Panel(self, -1)
button = wx.Button(panelbtn, -1, _(u"Fermer"))
self.panelbtn = panelbtn
#
# Mise en place
#
sizer0.Add(self.pop, 0, flag = wx.EXPAND)
sizer0.Add(self.ZoneAjustement,1, flag = wx.EXPAND)
sizer0.Add(panelbtn, flag = wx.EXPAND|wx.ALIGN_CENTER)
self.SetSizer(sizer0)
self.sizer = sizer0
self.signal = 0
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.Bind(wx.EVT_BUTTON, self.OnCloseMe, button)
# self.SetInitialSize(size)
self.SetAutoLayout(False)
def OnCloseWindow(self, event):
print "OnCloseWindow"
self.Parent.win = None
self.Destroy()
return
######################################################################################################
def SetAffComplet(self):
# print "SetAffComplet"
self.AffComplet = not self.AffComplet
if self.AffComplet:
self.TailleOrigZg = self.GetClientSize()
posZg = self.ZoneAjustement.GetScreenPosition()
self.pop.Show(True)
self.sizer.Insert(0, self.pop, 0, flag = wx.EXPAND)
self.panelbtn.Show(True)
self.sizer.Insert(2, self.panelbtn, 0, flag = wx.EXPAND)
self.SetClientSize(self.TailleOrig)
PosOrig = self.GetPosition()
self.SetPosition((PosOrig[0], posZg[1]+self.dph))
else:
self.TailleOrig = self.GetClientSize()
PosOrig = self.GetPosition()
posZg = self.ZoneAjustement.GetScreenPosition()
posBmp = self.pop.GetScreenPosition()
dph = PosOrig[1] - posBmp[1]
self.dph = PosOrig[1] - posZg[1]
self.pop.Show(False)
self.sizer.Detach(self.pop)
self.panelbtn.Show(False)
self.sizer.Detach(self.panelbtn)
if hasattr(self, 'TailleOrigZg'):
self.SetClientSize(self.TailleOrigZg)
else:
self.SetClientSize(self.ZoneAjustement.GetSize())
self.SetPosition((PosOrig[0], posZg[1]+dph))
self.Layout()
def OnCloseMe(self, event):
self.Close(True)
######################################################################################################
def MiseAJour(self, FT, mesures, reponse):
self.FT = FT[0]
self.Mesure = mesures
self.Reponse = reponse
self.MiseAJourBmp()
self.ZoneAjustement.mettreAJourEtRedessiner(self.Mesure, self.Reponse)
######################################################################################################
def ReTracer(self):
if hasattr(self.ZoneAjustement.child[0], 'consigne'):
self.ZoneAjustement.child[0].TracerTout()
else:
self.MiseAJour(0)
######################################################################################################
def OnSize(self, event):
self.sizer.SetDimension(0,0,self.GetClientSize()[0],self.GetClientSize()[1])
######################################################################################################
def setFT(self, FT, mesures, reponse):
self.FT = FT[0]
self.Mesure = mesures
self.Reponse = reponse
self.MiseAJourBmp()
######################################################################################################
def MiseAJourBmp(self):
if self.FT == None:
return
# print "MiseAJourBmp"
self.BmpFT.SetBitmap(self.FT.getBitmap(), self.GetBmpHD)
# self.MiseAJour()
self.pop.Fit()
self.pop.FitInside()
self.pop.Refresh()
# self.sizer.Fit()
# self.sizer.Fit(self)
self.sizer.SetItemMinSize(self.pop, self.pop.GetSize()[0], self.pop.GetSize()[1])
self.sizer.Layout()
def GetBmpHD(self):
return self.FT.getBitmap(taille = globdef.FONT_SIZE_FT_HD)
def modifierAntialiased(self):
self.ZoneAjustement.child[0].modifierAntialiased()
| gpl-3.0 | 8,334,137,796,830,296,000 | 31.752412 | 137 | 0.461614 | false |
octacoin-project/beta | contrib/seeds/makeseeds.py | 1 | 3747 | #!/usr/bin/env python
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = set([
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
])
import re
import sys
import dns.resolver
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):4245$")
PATTERN_AGENT = re.compile(r"^(\/Satoshi:0.8.6\/|\/Satoshi:0.9.(2|3)\/|\/Satoshi:0.10.\d{1,2}\/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
# Match only IPv4
m = PATTERN_IPV4.match(sline[0])
if m is None:
return None
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'ip': m.group(1),
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
}
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
result = []
asn_count = {}
for ip in ips:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid IPv4 address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['ipnum']))
for ip in ips:
print ip['ip']
if __name__ == '__main__':
main()
| mit | -5,786,766,114,597,601,000 | 30.754237 | 186 | 0.57139 | false |
dakcarto/suite-qgis-plugin | src/opengeo/test/integrationtest.py | 1 | 3347 | import unittest
from PyQt4.QtCore import QSettings
from opengeo.gui.explorer import OpenGeoExplorer
from opengeo.test import utils
from opengeo.gui.gsexploreritems import GsCatalogItem
from opengeo.gui.pgexploreritems import PgConnectionItem
class ExplorerIntegrationTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.explorer = OpenGeoExplorer(singletab = True)
cls.cat = utils.getGeoServerCatalog().catalog
utils.populateCatalog(cls.cat)
cls.catalogItem = GsCatalogItem(cls.cat, "catalog", "")
cls.explorer.explorerWidget.gsItem.addChild(cls.catalogItem)
cls.catalogItem.populate()
cls.tree = cls.explorer.explorerWidget.tree
cls.conn = utils.getPostgresConnection()
cls.pgItem = PgConnectionItem(cls.conn)
cls.explorer.explorerWidget.pgItem.addChild(cls.pgItem)
# @TODO - make tests pass using importer
cls.useRestApi = QSettings().setValue("/OpenGeo/Settings/GeoServer/UseRestApi", True)
@classmethod
def tearDownClass(cls):
utils.cleanCatalog(cls.cat)
utils.cleanDatabase(cls.conn)
def _getItemUnder(self, parent, name):
for idx in range(parent.childCount()):
item = parent.child(idx)
if item.text(0) == name:
return item
def getStoreItem(self, ws, name):
return self._getItemUnder(self.getWorkspaceItem(ws), name)
def getWorkspaceItem(self, name):
return self._getItemUnder(self.getWorkspacesItem(), name)
def getLayerItem(self, name):
return self._getItemUnder(self.getLayersItem(), name)
def getGroupItem(self, name):
return self._getItemUnder(self.getGroupsItem(), name)
def getStyleItem(self, name):
return self._getItemUnder(self.getStylesItem(), name)
def getWorkspacesItem(self):
return self.catalogItem.child(0)
def getLayersItem(self):
return self.catalogItem.child(1)
def getGroupsItem(self):
return self.catalogItem.child(2)
def getStylesItem(self):
return self.catalogItem.child(3)
def getPGConnectionsItem(self):
return self.explorer.explorerWidget.pgItem
def getPGConnectionItem(self):
return self.pgItem
def getPGSchemaItem(self, name):
return self._getItemUnder(self.getPGConnectionItem(), name)
def getPGTableItem(self, table, schema = "public"):
return self._getItemUnder(self.getPGSchemaItem(schema), table)
def getQgsLayersItem(self):
return self.explorer.explorerWidget.qgsItem.child(0)
def getQgsLayerItem(self, name):
return self._getItemUnder(self.getQgsLayersItem(), name)
def getQgsGroupsItem(self):
return self.explorer.explorerWidget.qgsItem.child(1)
def getQgsGroupItem(self, name):
return self._getItemUnder(self.getQgsGroupsItem(), name)
def getQgsStylesItem(self):
return self.explorer.explorerWidget.qgsItem.child(2)
def getQgsStyleItem(self, name):
return self._getItemUnder(self.getQgsStylesItem(), "Style of layer '%s'" % name)
def getGWCLayersItem(self):
return self.catalogItem.child(4)
def getGWCLayerItem(self, name):
return self._getItemUnder(self.getGWCLayersItem(), name)
| gpl-2.0 | -6,925,303,547,348,892,000 | 32.148515 | 93 | 0.684195 | false |
googleapis/googleapis-gen | google/cloud/resourcemanager/v3/resourcemanager-v3-py/google/cloud/resourcemanager_v3/services/tag_values/pagers.py | 1 | 5794 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.resourcemanager_v3.types import tag_values
class ListTagValuesPager:
"""A pager for iterating through ``list_tag_values`` requests.
This class thinly wraps an initial
:class:`google.cloud.resourcemanager_v3.types.ListTagValuesResponse` object, and
provides an ``__iter__`` method to iterate through its
``tag_values`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListTagValues`` requests and continue to iterate
through the ``tag_values`` field on the
corresponding responses.
All the usual :class:`google.cloud.resourcemanager_v3.types.ListTagValuesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., tag_values.ListTagValuesResponse],
request: tag_values.ListTagValuesRequest,
response: tag_values.ListTagValuesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.resourcemanager_v3.types.ListTagValuesRequest):
The initial request object.
response (google.cloud.resourcemanager_v3.types.ListTagValuesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = tag_values.ListTagValuesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[tag_values.ListTagValuesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[tag_values.TagValue]:
for page in self.pages:
yield from page.tag_values
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListTagValuesAsyncPager:
"""A pager for iterating through ``list_tag_values`` requests.
This class thinly wraps an initial
:class:`google.cloud.resourcemanager_v3.types.ListTagValuesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``tag_values`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListTagValues`` requests and continue to iterate
through the ``tag_values`` field on the
corresponding responses.
All the usual :class:`google.cloud.resourcemanager_v3.types.ListTagValuesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[tag_values.ListTagValuesResponse]],
request: tag_values.ListTagValuesRequest,
response: tag_values.ListTagValuesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.resourcemanager_v3.types.ListTagValuesRequest):
The initial request object.
response (google.cloud.resourcemanager_v3.types.ListTagValuesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = tag_values.ListTagValuesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[tag_values.ListTagValuesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[tag_values.TagValue]:
async def async_generator():
async for page in self.pages:
for response in page.tag_values:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| apache-2.0 | -632,609,721,965,289,900 | 40.385714 | 95 | 0.656196 | false |
therealkbhat/mazerunner | mazerunner/mazerunner.py | 1 | 8720 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
Usage:
mazerunner.py -m <filename>
"""
import signal
import random
import curses
import time
import sys
import pdb
import os
import threading
import atexit
import locale
from docopt import docopt
locale.setlocale(locale.LC_ALL,"") # necessary to get curses to work with unicode
grid = []
player_pos = {}
trolls = []
exit_pos = {}
screen = curses.initscr()
curses.start_color()
curses.use_default_colors()
curses.noecho()
curses.cbreak()
curses.init_pair(4, curses.COLOR_GREEN, curses.COLOR_BLACK) # trolls
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE) # walls
curses.init_pair(2, curses.COLOR_YELLOW, curses.COLOR_BLACK) # player
curses.init_pair(3, curses.COLOR_MAGENTA, curses.COLOR_WHITE) # exit
curses.init_pair(5, curses.COLOR_CYAN, curses.COLOR_BLACK) # empty space
# characters to use when drawing
Troll = u'☃' # u'T'
Wall = u'░' # u'#'
Exit = u'⚙' # u'X'
Empty = u'∴' # u' '
Player = (u'◀', u'▲', u'▶', u'▼') #(u'<', u'^', u'>', u'v')
# indices into Player for different orientations
LEFT = 0
UP = 1
RIGHT = 2
DOWN = 3
screen.keypad(1)
def doexit():
from subprocess import call
call(["stty", "sane"])
atexit.register(doexit)
def sig_handler(signal, frame):
curses.nocbreak()
screen.keypad(0)
curses.echo()
sys.exit(0)
def getEmptySpace(width, height):
"""Returns a random empty spot in the maze."""
while True:
x = random.randint(0, width - 1)
y = random.randint(0, height - 1)
if grid[y][x] == Empty:
return x, y
def init(fname):
"""Read maze from file and place player and troll in random spots."""
fname = "mazerunner/mazes/"+fname
if not os.path.exists(fname):
sys.exit("Maze file does not exist")
# perhaps use a generated maze here
with open(fname, "r") as f:
for line in f:
# replace markers in input for walls/etc with characters used for rendering
row = list(line.strip().decode("utf-8").replace(u'#', Wall).replace(' ', Empty).replace('X', Exit))
grid.append(row)
width = len(grid[0])
height = len(grid)
for idx, row in enumerate(grid):
if Exit in row:
exit_pos['x'] = row.index(Exit)
exit_pos['y'] = idx
player_pos['x'], player_pos['y'] = getEmptySpace(width, height)
grid[player_pos['y']][player_pos['x']] = Player[UP]
for t in range(10):
x, y = getEmptySpace(width, height)
grid[y][x] = Troll
trolls.append({'x': x, 'y': y})
def isBorderBlock(x, y):
"""Checks if given x, y corresponds to a border block."""
width = len(grid[0])
height = len(grid)
if x == 0 or x == width - 1:
return True
if y == 0 or y == height - 1:
return True
return False
def render():
"""Clear screen and redraw it."""
screen.clear()
temp = grid
for row in temp:
for idx, ch in enumerate(row):
if ch == Wall:
screen.addstr(ch.encode('utf8'), curses.color_pair(1))
elif ch == Troll:
screen.addstr(ch.encode('utf8'), curses.color_pair(4))
elif ch in Player:
screen.addstr(ch.encode('utf8'), curses.color_pair(2))
elif ch == Exit:
screen.addstr(ch.encode('utf8'), curses.color_pair(3))
else:
screen.addstr(ch.encode('utf8'), curses.color_pair(5) | curses.A_DIM)
if idx == (len(row) - 1):
screen.addstr('\n')
screen.refresh()
def moveTrolls():
"""Move trolls towards player."""
while True:
render()
time.sleep(1)
for troll in trolls:
grid[troll['y']][troll['x']] = Empty
trollDir = ''
possibilities = []
moved = False
if troll['x'] == player_pos['x'] and troll['y'] == player_pos['y']:
print('YOU WERE EATEN')
sys.exit(0)
if (troll['x'] - player_pos['x']) > 0:
trollDir += 'l'
elif (troll['x'] - player_pos['x']) < 0:
trollDir += 'r'
if (troll['y'] - player_pos['y']) > 0:
trollDir += 'u'
elif (troll['y'] - player_pos['y']) < 0:
trollDir += 'd'
for ch in trollDir:
if ch == 'u':
possibilities.append((troll['x'], troll['y'] - 1))
elif ch == 'd':
possibilities.append((troll['x'], troll['y'] + 1))
elif ch == 'l':
possibilities.append((troll['x'] - 1, troll['y']))
elif ch == 'r':
possibilities.append((troll['x'] + 1, troll['y']))
for p in possibilities:
if grid[p[1]][p[0]] in (Empty,) + Player:
troll['x'] = p[0]
troll['y'] = p[1]
grid[p[1]][p[0]] = Troll
moved = True
break
if not moved:
while True:
x = troll['x'] + [-1, 0, 1][random.randint(0, 2)]
y = troll['y'] + [-1, 0, 1][random.randint(0, 2)]
if grid[y][x] == Empty:
grid[troll['y']][troll['x']] = Empty
troll['x'] = x
troll['y'] = y
grid[y][x] = Troll
break
def pushBlock(x, y):
"""If given x, y is empty, place a block there."""
if grid[y][x] == Empty:
grid[y][x] = Wall
return True
elif grid[y][x] == Troll:
for idx, troll in enumerate(trolls):
if troll['x'] == x and troll['y'] == y:
grid[y][x] = Wall
del trolls[idx]
return True
return False
def updatePlayerPosition(direction):
"""Updates the grid depending on direction entered by user."""
oldX = player_pos['x']
oldY = player_pos['y']
if grid[oldY][oldX] == Troll:
print('YOU WERE EATEN')
sys.exit(0)
# turn player if they're changing direction
if grid[oldY][oldX] != Player[direction]:
grid[oldY][oldX] = Player[direction]
return
if direction == UP:
if grid[oldY - 1][oldX] == Wall:
if not isBorderBlock(oldX, oldY - 1):
if not pushBlock(oldX, oldY - 2):
return
else:
return
player_pos['y'] -= 1
elif direction == DOWN:
if grid[oldY + 1][oldX] == Wall:
if not isBorderBlock(oldX, oldY + 1):
if not pushBlock(oldX, oldY + 2):
return
else:
return
player_pos['y'] += 1
elif direction == LEFT:
if grid[oldY][oldX - 1] == Wall:
if not isBorderBlock(oldX - 1, oldY):
if not pushBlock(oldX - 2, oldY):
return
else:
return
player_pos['x'] -= 1
else: # RIGHT
if grid[oldY][oldX + 1] == Wall:
if not isBorderBlock(oldX + 1, oldY):
if not pushBlock(oldX + 2, oldY):
return
else:
return
player_pos['x'] += 1
grid[player_pos['y']][player_pos['x']] = grid[oldY][oldX]
grid[oldY][oldX] = Empty
for troll in trolls:
if player_pos['y'] == troll['y'] and player_pos['x'] == troll['x']:
grid[player_pos['y']][player_pos['x']] = Troll
render()
print('YOU WERE EATEN')
sys.exit(0)
if player_pos['y'] == exit_pos['y'] and player_pos['x'] == exit_pos['x']:
print('VICTORY')
sys.exit(0)
def gameLoop():
"""Main game loop; receives keypresses from user and handles them."""
while True:
ch = screen.getch()
if ch == curses.KEY_UP:
updatePlayerPosition(UP)
elif ch == curses.KEY_DOWN:
updatePlayerPosition(DOWN)
elif ch == curses.KEY_LEFT:
updatePlayerPosition(LEFT)
elif ch == curses.KEY_RIGHT:
updatePlayerPosition(RIGHT)
elif ch == ord('q'):
curses.nocbreak()
screen.keypad(0)
curses.echo()
sys.exit(0)
render()
if __name__ == "__main__":
arguments = docopt(__doc__)
signal.signal(signal.SIGINT, sig_handler)
print(arguments['<filename>'])
init(arguments['<filename>'])
troll_thread = threading.Thread(target=moveTrolls)
troll_thread.daemon = True
troll_thread.start()
render()
gameLoop()
| mit | -4,523,575,646,898,852,000 | 28.306397 | 111 | 0.507927 | false |
harterj/moose | python/moosesqa/test/test_SQARequirementReport.py | 9 | 1711 | #!/usr/bin/env python
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import unittest
import mock
import logging
import pyhit
import mooseutils
from moosesqa import SQAReport, SQARequirementReport
@unittest.skipIf(mooseutils.git_version() < (2,11,4), "Git version must at least 2.11.4")
class TestSQARequirementReport(unittest.TestCase):
@mock.patch('mooseutils.colorText', side_effect=lambda t, c, **kwargs: t)
def testBasic(self, color_text):
reporter = SQARequirementReport(title='moosesqa', directories=['python/moosesqa/test'])
r = reporter.getReport()
self.assertEqual(reporter.status, SQAReport.Status.PASS)
self.assertIn('moosesqa OK', r)
@mock.patch('mooseutils.colorText', side_effect=lambda t, c, **kwargs: '{}:{}'.format(c,t))
def testOptions(self, *args):
reporter = SQARequirementReport(title='testing', specs='spec_missing_req',
directories=['python/moosesqa/test/specs'])
r = reporter.getReport()
self.assertEqual(reporter.status, SQAReport.Status.ERROR)
self.assertIn('log_missing_requirement: 1', r)
self.assertIn('log_missing_design: 1', r)
self.assertIn('log_missing_issues: 1', r)
self.assertIn('log_empty_requirement: 1', r)
self.assertIn('log_empty_design: 1', r)
self.assertIn('log_empty_issues: 1',r )
if __name__ == '__main__':
unittest.main(verbosity=2)
| lgpl-2.1 | 1,884,194,731,954,327,300 | 38.790698 | 95 | 0.676797 | false |
cpausmit/FiBS | bin/fibsLastLogs.py | 1 | 1634 | #!/usr/bin/env python
#---------------------------------------------------------------------------------------------------
# Script to find the youngest logfiles of completed jobs for a given FiBS task.
#---------------------------------------------------------------------------------------------------
import os,sys
import rex
def getJobsFromOutput(out):
# decode the full find output and make a list of non zero err or out tasks
jobSizes = {}
for line in out.split('\n'):
line = line[:-1]
f = line.split(' ')
if len(f) >= 3:
size = f[1]
job = f[2]
job = ".".join(job.split(".")[:-1])
job = job.replace("./","")
if job in jobSizes:
if jobSizes[job] < size:
jobSizes[job] = size
else:
jobSizes[job] = size
#print ' job: %s'%(job)
return jobSizes
# get our parameters as needed
base = os.environ.get('FIBS_BASE','')
logs = os.environ.get('FIBS_LOGS','')
minutes = "30"
if base == '':
print ' ERROR - FIBS_BASE is not defined. EXIT '
if len(sys.argv) < 2:
print ' ERROR - please specify task '
task = sys.argv[1]
if len(sys.argv) > 2:
minutes = sys.argv[2]
os.chdir(logs+'/'+task);
cmd = "find ./ -cmin -" + minutes + " -printf \"%T@ %s %p\n\" | egrep \(.out\|.err\) | sort -n | tail -100"
rex = rex.Rex('none','none')
(rc,out,err) = rex.executeLocalAction(cmd)
# now print them out
jobSizes = getJobsFromOutput(out)
for job in jobSizes:
size = jobSizes[job]
if size > 1:
print " %s/%s/%s.{err,out}"%(logs,task,job)
| mit | -2,247,456,938,416,008,200 | 28.178571 | 107 | 0.479804 | false |
erigones/esdc-ce | gui/widgets.py | 1 | 7361 | import json
import phonenumbers
from django import forms
from django.forms import widgets
from django.utils import six
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from frozendict import frozendict
from taggit.forms import TagWidget as _TagWidget
from api.utils.encoders import JSONEncoder
from gui.models import UserProfile
__all__ = (
'NumberInput',
'EmailInput',
'URLInput',
'TelInput',
'TelPrefixInput',
'ByteSizeInput',
'ArrayWidget',
'ArrayAreaWidget',
'DictWidget',
'TagWidget',
)
HTML5_ATTRS = frozendict({'autocorrect': 'off', 'autocapitalize': 'off', 'spellcheck': 'false'})
def edit_string_for_items(array, escape_space=True, escape_comma=True, sort=False):
"""Like taggit.utils.edit_string_for_tags, but with list/tuple as input and without sorting"""
items = []
for i in array:
if not isinstance(i, six.string_types):
i = str(i)
if escape_space and ' ' in i:
items.append('"%s"' % i)
if escape_comma and ',' in i:
items.append('"%s"' % i)
else:
items.append(i)
if sort:
items.sort()
return ','.join(items)
# noinspection PyAbstractClass
class _DefaultAttrsWidget(widgets.Widget):
default_attrs = None
default_class = None
def __init__(self, attrs=None):
if self.default_attrs:
# dict() converts default_attrs from frozendict to regular dict
defaults = dict(self.default_attrs)
if attrs:
defaults.update(attrs)
else:
defaults = attrs
super(_DefaultAttrsWidget, self).__init__(attrs=defaults)
if self.default_class:
self.attrs['class'] = (self.default_class + ' ' + self.attrs.get('class', '')).rstrip()
class ArrayWidget(_DefaultAttrsWidget, widgets.TextInput):
tag_choices = None
def __init__(self, *args, **kwargs):
self.tags = kwargs.pop('tags', False)
self.escape_space = kwargs.pop('escape_space', True)
self.escape_comma = kwargs.pop('escape_comma', True)
super(ArrayWidget, self).__init__(*args, **kwargs)
def build_attrs(self, *args, **kwargs):
if self.tag_choices:
tags = json.dumps(self.tag_choices, indent=None, cls=JSONEncoder)
kwargs['data-tags-choices'] = mark_safe(conditional_escape(tags))
return super(ArrayWidget, self).build_attrs(*args, **kwargs)
def render(self, name, value, attrs=None):
if value is not None and not isinstance(value, six.string_types):
value = edit_string_for_items(value, escape_space=self.escape_space, escape_comma=self.escape_comma,
sort=self.tags)
return super(ArrayWidget, self).render(name, value, attrs=attrs)
class ArrayAreaWidget(_DefaultAttrsWidget, widgets.Textarea):
default_attrs = frozendict({'rows': 3, 'cols': 40})
default_class = 'input-array'
def render(self, name, value, attrs=None):
if value is not None and not isinstance(value, six.string_types):
value = json.dumps(value, indent=4, ensure_ascii=False)
return super(ArrayAreaWidget, self).render(name, value, attrs=attrs)
class DictWidget(_DefaultAttrsWidget, widgets.Textarea):
default_attrs = frozendict({'rows': 4, 'cols': 40})
default_class = 'input-mdata'
def render(self, name, value, attrs=None):
if value is not None and not isinstance(value, six.string_types):
value = json.dumps(value, indent=4, ensure_ascii=False)
return super(DictWidget, self).render(name, value, attrs=attrs)
class NumberInput(_DefaultAttrsWidget, widgets.Input):
"""
HTML5 input type for numbers.
"""
input_type = 'number'
default_attrs = HTML5_ATTRS
class EmailInput(_DefaultAttrsWidget, widgets.Input):
"""
HTML5 input type for email address.
"""
input_type = 'email'
default_attrs = HTML5_ATTRS
class URLInput(_DefaultAttrsWidget, widgets.URLInput):
"""
HTML5 input type for URL address.
"""
input_type = 'url'
default_attrs = HTML5_ATTRS
class TelInput(_DefaultAttrsWidget, widgets.Input):
"""
HTML5 input type for url address
"""
input_type = 'tel'
default_attrs = HTML5_ATTRS
class ByteSizeInput(_DefaultAttrsWidget, widgets.TextInput):
"""
HTML5 input type for url address
"""
default_attrs = frozendict({'pattern': '[0-9.]+[BKMGTPEbkmgtpe]?'})
# noinspection PyAbstractClass
class TelPrefixSelect(widgets.Select):
"""
A drop-down menu with international phone prefixes.
"""
# noinspection PyUnusedLocal
def __init__(self, attrs=None, choices=()):
super(TelPrefixSelect, self).__init__(attrs=attrs, choices=UserProfile.PHONE_PREFIXES)
def build_attrs(self, extra_attrs=None, **kwargs):
attrs = super(TelPrefixSelect, self).build_attrs(extra_attrs=extra_attrs, **kwargs)
attrs['class'] = 'input-select2'
attrs.pop('maxlength', None)
return attrs
# noinspection PyAbstractClass
class TelPrefixInput(widgets.MultiWidget):
"""
A Widget that splits phone number input into:
- a country select box for phone prefix
- an input for local phone number
"""
erase_on_empty_input = False
# noinspection PyUnusedLocal
def __init__(self, attrs=None, initial=None):
if attrs:
self.erase_on_empty_input = attrs.pop('erase_on_empty_input', False)
multi_widgets = [TelPrefixSelect(attrs=attrs), TelInput(attrs=attrs)]
super(TelPrefixInput, self).__init__(multi_widgets, attrs=attrs)
def decompress(self, value):
if value:
# noinspection PyBroadException
try:
num = phonenumbers.parse(value)
except Exception:
return value.split(' ', 1)
else:
return ['+' + str(num.country_code), str(num.national_number)]
return [None, None]
def value_from_datadict(self, data, files, name):
values = super(TelPrefixInput, self).value_from_datadict(data, files, name)
if self.erase_on_empty_input and not values[1]:
return ''
else:
return '%s %s' % tuple(values)
def clean_international_phonenumber(value):
"""
Validate phone number taken from TelPrefixInput and return in format suitable for our DB.
"""
invalid_number_message = _(u'The phone number entered is not valid.')
try:
num = phonenumbers.parse(value)
if not phonenumbers.is_valid_number(num):
raise forms.ValidationError(invalid_number_message)
except phonenumbers.NumberParseException:
raise forms.ValidationError(invalid_number_message)
return phonenumbers.format_number(num, phonenumbers.PhoneNumberFormat.E164)
# noinspection PyAbstractClass
class TagWidget(_TagWidget):
tag_choices = None
def build_attrs(self, *args, **kwargs):
if self.tag_choices:
tags = json.dumps(self.tag_choices, indent=None, cls=JSONEncoder)
kwargs['data-tags-choices'] = mark_safe(conditional_escape(tags))
return super(TagWidget, self).build_attrs(*args, **kwargs)
| apache-2.0 | 8,442,338,872,305,309,000 | 30.728448 | 112 | 0.645836 | false |
danielelinaro/BAL | python/examples/solver.py | 1 | 1024 | #!/usr/bin/env python
from pybal import bal
from pylab import figure, plot, xlabel, ylabel, title, show, axis
# create a dynamical system and load the equations of the Hindmarsh-Rose neuron model
hr = bal.DynamicalSystem()
hr.create('HindmarshRose')
# create the parameters of the model
par = bal.Parameters(hr.npar)
# set the fixed parameters
par.setpars([2.96,0.01,4],(0,2,3))
# set the bifurcation parameter
par.bifpar(1,[2.5,4.5,11])
# create an ODE solver
solver = bal.ODESolver(hr,par)
solver.x0 = [0,0,0]
solver.dt = 0.01
solver.ttran = 1000.0
solver.tstop = 2000.0
solver.mode = 'trajectory + events'
# iterate over all possible tuple of parameters
for p in par:
# integrate
solver.run()
# get the solution
s = solver.solution()
# plot the results of the integration
figure()
plot(s.data['x'][5::3],s.data['x'][3::3],'k')
xlabel('t (a.u.)')
ylabel('x (a.u.)')
title('I = '+str(p[1])+' '+str(s.parameters[1])+' # turns = '+str(solver.nturns))
axis('tight')
show()
| mit | -7,904,466,974,603,930,000 | 25.947368 | 85 | 0.661133 | false |
wangyang59/tf_models | video_prediction/prediction_train_flo_edge.py | 1 | 17341 | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorflow.examples.tutorials.mnist.mnist import loss
"""Code for training the prediction model."""
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from prediction_input_flo_edge import build_tfrecord_input, DATA_DIR
from prediction_input_flo_eval import build_tfrecord_input_eval
from prediction_model_flo_edge import construct_model
from visualize import plot_flo, plot_eval, plot_flo_edge
from optical_flow_warp import transformer
import os
# How often to record tensorboard summaries.
SUMMARY_INTERVAL = 40
# How often to run a batch through the validation model.
VAL_INTERVAL = 200
# How often to save a model checkpoint
SAVE_INTERVAL = 500
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', DATA_DIR, 'directory containing data.')
flags.DEFINE_string('output_dir', "", 'directory for model checkpoints.')
flags.DEFINE_integer('num_iterations', 100000, 'number of training iterations.')
flags.DEFINE_string('pretrained_model', '',
'filepath of a pretrained model to initialize from.')
flags.DEFINE_float('train_val_split', 1.0,
'The percentage of files to use for the training set,'
' vs. the validation set.')
flags.DEFINE_integer('batch_size', 32, 'batch size for training')
flags.DEFINE_float('learning_rate', 0.001,
'the base learning rate of the generator')
flags.DEFINE_integer('num_gpus', 1,
'the number of gpu to use')
def get_black_list(clses):
blacklist = []
for cls in clses:
fname = "/home/wangyang59/Data/ILSVRC2016/ImageSets/VID/train_%s.txt" % cls
with open(fname) as f:
content = f.readlines()
blacklist += [x.split(" ")[0].split("/")[-1] + ".tfrecord" for x in content]
return blacklist
## Helper functions
def peak_signal_to_noise_ratio(true, pred):
"""Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR)
"""
return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)
def mean_squared_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.square(true - pred)) / tf.to_float(tf.size(pred))
def weighted_mean_squared_error(true, pred, weight):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
tmp = tf.reduce_sum(weight*tf.square(true-pred), axis=[1,2], keep_dims=True) / tf.reduce_sum(weight, axis=[1, 2], keep_dims=True)
return tf.reduce_mean(tmp)
#return tf.reduce_sum(tf.square(true - pred)*weight) / tf.to_float(tf.size(pred))
#return tf.reduce_sum(tf.square(true - pred)*weight) / tf.reduce_sum(weight)
def mean_L1_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.abs(true - pred)) / tf.to_float(tf.size(pred))
def weighted_mean_L1_error(true, pred, weight):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.abs(true - pred)*weight) / tf.to_float(tf.size(pred))
def huber_error(true, pred, delta=0.05):
err = true - pred
herr = tf.where(tf.abs(err)<delta, 0.5*tf.square(err), delta*(tf.abs(err) - 0.5*delta)) # condition, true, false
return tf.reduce_sum(herr) / tf.to_float(tf.size(pred))
def cal_grad_error_wmask(image, mask, kernels):
"""Calculate the gradient of the given image by calculate the difference between nearby pixels
"""
error = 0.0
img_height, img_width, color_channels = map(int, image.get_shape()[1:4])
cropped_image = image[:, 1:(img_height-1), 1:(img_width-1), :]
cropped_mask = mask[:, 1:(img_height-1), 1:(img_width-1), :]
for kernel in kernels:
shifted_image = tf.nn.depthwise_conv2d(image, tf.tile(kernel, [1, 1, color_channels, 1]),
[1, 1, 1, 1], 'SAME')
error += weighted_mean_squared_error(cropped_image, shifted_image[:, 1:(img_height-1), 1:(img_width-1), :], cropped_mask)
return error / len(kernels)
def cal_grad_error(image, kernels):
"""Calculate the gradient of the given image by calculate the difference between nearby pixels
"""
error = 0.0
img_height, img_width, color_channels = map(int, image.get_shape()[1:4])
cropped_image = image[:, 1:(img_height-1), 1:(img_width-1), :]
for kernel in kernels:
shifted_image = tf.nn.depthwise_conv2d(image, tf.tile(kernel, [1, 1, color_channels, 1]),
[1, 1, 1, 1], 'SAME')
error += mean_L1_error(cropped_image, shifted_image[:, 1:(img_height-1), 1:(img_width-1), :])
return error / len(kernels)
def cal_weighted_var(image, mask):
weighted_mean = tf.reduce_sum(image*mask, axis=[1, 2], keep_dims=True) / tf.reduce_sum(mask, axis=[1, 2], keep_dims=True)
#mean = tf.reduce_mean(image, axis=[1, 2], keep_dims=True)
weighted_var = (tf.reduce_sum(mask*tf.square(image - weighted_mean), axis=[1,2], keep_dims=True) + 0.0) / tf.reduce_sum(mask, axis=[1, 2], keep_dims=True)
return tf.reduce_mean(weighted_var)
def cal_weighted_edge_diff(image, mask, kernels, scale=1.0):
error = 0.0
img_height, img_width, color_channels = map(int, image.get_shape()[1:4])
cropped_image = image[:, 1:(img_height-1), 1:(img_width-1), :]
cropped_mask = mask[:, 1:(img_height-1), 1:(img_width-1), :]
for kernel in kernels:
shifted_image = (tf.nn.depthwise_conv2d(image, tf.tile(kernel, [1, 1, color_channels, 1]),
[1, 1, 1, 1], 'SAME'))[:, 1:(img_height-1), 1:(img_width-1), :]
shifted_mask = (tf.nn.depthwise_conv2d(mask, tf.tile(kernel, [1, 1, 1, 1]),
[1, 1, 1, 1], 'SAME'))[:, 1:(img_height-1), 1:(img_width-1), :]
tmp = tf.exp(-tf.reduce_sum(tf.square(shifted_image-cropped_image), axis=[3], keep_dims=True) / scale) * tf.square(shifted_mask-cropped_mask)
error += tf.reduce_sum(tmp) / tf.to_float(tf.size(shifted_mask))
return error / len(kernels)
def create_flo_edge(flo, kernels):
batch_size, img_height, img_width, color_channels = map(int, flo.get_shape()[0:4])
flo_shift_v = tf.nn.depthwise_conv2d(flo, tf.tile(kernels[0], [1, 1, color_channels, 1]),
[1, 1, 1, 1], 'SAME')
flo_shift_h = tf.nn.depthwise_conv2d(flo, tf.tile(kernels[1], [1, 1, color_channels, 1]),
[1, 1, 1, 1], 'SAME')
pos = tf.constant(1.0, shape=[batch_size, img_height, img_width, 1])
neg = tf.constant(0.0, shape=[batch_size, img_height, img_width, 1])
true_edge = tf.where(tf.reduce_max(tf.concat([tf.abs(flo-flo_shift_h), tf.abs(flo-flo_shift_v)], axis=[3]),
axis=[3], keep_dims=True)>0.5,
x=pos, y=neg)
# true_edge_h = tf.where(tf.reduce_max(tf.abs(flo-flo_shift_h), axis=[3], keep_dims=True)>0.3, x=pos, y=neg)
# true_edge_v = tf.where(tf.reduce_max(tf.abs(flo-flo_shift_v), axis=[3], keep_dims=True)>0.3, x=pos, y=neg)
return true_edge
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
class Model(object):
def __init__(self,
image1=None,
flo=None,
true_edge=None,
reuse_scope=False,
scope=None,
prefix="train"):
#self.prefix = prefix = tf.placeholder(tf.string, [])
self.iter_num = tf.placeholder(tf.float32, [])
summaries = []
self.kernels = []
for i in xrange(4):
kernel = np.zeros((3 * 3), dtype=np.float32)
kernel[i*2 + 1] = 1.0
kernel = kernel.reshape((3, 3, 1, 1))
kernel = tf.constant(kernel, shape=(3, 3, 1, 1),
name='kernel_shift'+str(i), verify_shape=True)
self.kernels.append(kernel)
if not reuse_scope:
edge_mask = construct_model(image1)
else: # If it's a validation or test model.
with tf.variable_scope(scope, reuse=True):
edge_mask = construct_model(image1)
#flo_mag = tf.sqrt(tf.square(flo[:,:,:,0:1]) + tf.square(flo[:,:,:,1:2]))
# true_edge_h, true_edge_v, true_edge = create_flo_edge(flo, self.kernels)
loss = -tf.reduce_mean(true_edge*tf.log(edge_mask+1e-10) + (1.0-true_edge)*tf.log(1.0-edge_mask+1e-10))
#loss_v = -tf.reduce_mean(true_edge_v*tf.log(edge_mask_v+1e-10) + (1.0-true_edge_v)*tf.log(1.0-edge_mask_v+1e-10))
self.loss = loss
self.orig_image1 = image1
self.flo = flo
self.true_edge = true_edge
self.pred_edge = edge_mask
summaries.append(tf.summary.scalar(prefix + '_loss', self.loss))
self.summ_op = tf.summary.merge(summaries)
class Model_eval(object):
def __init__(self,
image=None,
mask=None,
scope=None):
#self.prefix = prefix = tf.placeholder(tf.string, [])
self.iter_num = tf.placeholder(tf.float32, [])
summaries = []
self.kernels = []
for i in xrange(4):
kernel = np.zeros((3 * 3), dtype=np.float32)
kernel[i*2 + 1] = 1.0
kernel = kernel.reshape((3, 3, 1, 1))
kernel = tf.constant(kernel, shape=(3, 3, 1, 1),
name='kernel_shift'+str(i), verify_shape=True)
self.kernels.append(kernel)
batch_size, img_height, img_width = map(int, image.get_shape()[0:3])
with tf.variable_scope(scope, reuse=True):
edge_mask_h = construct_model(image)
#self.summ_op = tf.summary.merge(summaries)
self.image = image
self.mask_true = mask
self.pred_edge_h = edge_mask_h
def main(unused_argv):
if FLAGS.output_dir == "":
raise Exception("OUT_DIR must be specified")
if os.path.exists(FLAGS.output_dir):
raise Exception("OUT_DIR already exist")
print 'Constructing models and inputs.'
with tf.Graph().as_default(), tf.device('/cpu:0'):
train_op = tf.train.AdamOptimizer(FLAGS.learning_rate)
tower_grads = []
itr_placeholders = []
image, flo, true_edge= build_tfrecord_input(training=True)
split_image = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=image)
split_flo = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=flo)
split_true_edge = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=true_edge)
image_eval, mask_eval = build_tfrecord_input_eval()
summaries_cpu = tf.get_collection(tf.GraphKeys.SUMMARIES, tf.get_variable_scope().name)
with tf.variable_scope(tf.get_variable_scope()) as vs:
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
if i == FLAGS.num_gpus - 1:
scopename = "model"
else:
scopename = '%s_%d' % ("tower", i)
with tf.name_scope(scopename) as ns:
if i == 0:
model = Model(split_image[i], split_flo[i], split_true_edge[i], reuse_scope=False, scope=vs)
else:
model = Model(split_image[i], split_flo[i], split_true_edge[i], reuse_scope=True, scope=vs)
loss = model.loss
# Retain the summaries from the final tower.
if i == FLAGS.num_gpus - 1:
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, ns)
eval_model = Model_eval(image_eval, mask_eval, scope=vs)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = train_op.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
itr_placeholders.append(model.iter_num)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Apply the gradients to adjust the shared variables.
apply_gradient_op = train_op.apply_gradients(grads)
# Create a saver.
saver = tf.train.Saver(
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=5)
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries + summaries_cpu)
# Make training session.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False))
summary_writer = tf.summary.FileWriter(
FLAGS.output_dir, graph=sess.graph, flush_secs=10)
if FLAGS.pretrained_model:
saver.restore(sess, FLAGS.pretrained_model)
start_itr = int(FLAGS.pretrained_model.split("/")[-1][5:])
else:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
start_itr = 0
tf.train.start_queue_runners(sess)
# Run training.
for itr in range(start_itr, FLAGS.num_iterations):
# Generate new batch of data.
feed_dict = {x:np.float32(itr) for x in itr_placeholders}
_, summary_str = sess.run([apply_gradient_op, summary_op],
feed_dict)
if (itr) % SAVE_INTERVAL == 2:
orig_image1, flo, true_edge, \
pred_edge = sess.run([model.orig_image1,
model.flo,
model.true_edge,
model.pred_edge],
feed_dict)
if (itr) % (SAVE_INTERVAL*10) == 2:
tf.logging.info('Saving model.')
saver.save(sess, FLAGS.output_dir + '/model' + str(itr))
plot_flo_edge(orig_image1, flo, true_edge, pred_edge,
output_dir=FLAGS.output_dir, itr=itr)
# eval_summary_str, eval_image, eval_mask_true, eval_mask_pred = sess.run([eval_model.summ_op,
# eval_model.image,
# eval_model.mask_true,
# eval_model.mask_pred])
#
# plot_eval(eval_image, eval_mask_true, eval_mask_pred,
# output_dir=FLAGS.output_dir, itr=itr)
if (itr) % SUMMARY_INTERVAL:
summary_writer.add_summary(summary_str, itr)
# if (itr) % SUMMARY_INTERVAL*2 :
# eval_summary_str = sess.run(eval_model.summ_op)
# summary_writer.add_summary(eval_summary_str, itr)
tf.logging.info('Saving model.')
saver.save(sess, FLAGS.output_dir + '/model')
tf.logging.info('Training complete')
tf.logging.flush
if __name__ == '__main__':
app.run()
| apache-2.0 | -8,993,515,519,394,201,000 | 37.707589 | 156 | 0.61098 | false |
lokokung/Starburst-Front-End-Control-System | core/feanta_server.py | 1 | 8262 | """
STARBURST ACC/FEANTA Middle Server
Author: Lokbondo Kung
Email: [email protected]
"""
import datetime
import socket
import sys
import time
import threading
import gen_fem_sf
import traceback
# Logging information.
TIMESTAMP_FMT = '%Y-%m-%d %H:%M:%S'
LOG_FILE = 'bridge_server.log'
# Define all constants:
# Currently hard-coded, will eventually be read from acc.ini
HOST = ''
HOST_PORT = 5676
ACC_HOSTNAME = 'acc.solar.pvt'
ACC_PORT = 5675
VERSION = 1.2 # Version date: 10/6/2015
# region Class Description
"""
Class: ServerDaemon
Description:
Implementation of a daemon server that is intended to run on
the feanta box in order to process commands from the ACC and
direct and execute the commands to the sub-units connected to
the feanta computer.
Arguments:
pidfile: string designating the .pid file to save the pid for
this daemon process to allow for the process to be stopped
by the stop function or to be stopped manually in Linux.
"""
# endregion
class ServerDaemon():
def __init__(self, pidfile):
self.pidfile_path = pidfile
self.pidfile_timeout = 5
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/null'
self.stderr_path = '/dev/null'
self.workers = {}
self.function_map = {}
self.log_file = LOG_FILE
self.acc_ip = socket.gethostbyname(ACC_HOSTNAME)
# ---------------------------------------------------------------
# BASIC ROUTINES:
# ---------------------------------------------------------------
def __get_timestamp(self):
current_time = time.time()
timestamp = datetime.datetime.fromtimestamp(current_time)
timestamp = timestamp.strftime(TIMESTAMP_FMT)
return timestamp
def __log(self, message):
log_message = self.__get_timestamp() + ': ' + str(message) + '\n'
f = open(self.log_file, "a")
f.write(log_message)
f.close()
print log_message
# ---------------------------------------------------------------
# CORE ROUTINES
# ---------------------------------------------------------------
# region Method Description
"""
Method: link_worker
Description:
This method is used to link a worker extending the i_worker
class to this server so that commands associated with the
i_worker can be executed properly through this server.
Arguments:
worker: the target worker to be linked to this server
"""
# endregion
def link_worker(self, worker):
self.workers[worker.name] = worker
for command in worker.get_command_list():
self.function_map[command] = worker
worker.set_logger(self.__log)
# region Method Description
"""
Method: list_workers
Description:
Lists each worker linked to this ServerDaemon.
"""
# endregion
def list_workers(self):
workers_list = ''
for worker in self.workers.keys():
workers_list += worker + '\n'
return workers_list
# region Method Description
"""
Method: set_log_file
Description:
Sets the destination path for the log file. Defaulted to
LOG_FILE.
"""
# endregion
def set_log_file(self, log_file_destination):
self.log_file = log_file_destination
# region Method Description
"""
Method: list_commands
Description:
Lists every command that this server can respond to.
"""
# endregion
def list_commands(self):
return self.function_map.keys()
def make_stateframe_dict(self):
fem_dict = {}
# Handle powerstrip cluster.
worker = self.workers.get('PDU-Worker', None)
if worker is not None:
try:
fem_dict['POWERSTRIP'] = worker.stateframe_query()
except Exception, e:
fem_dict['POWERSTRIP'] = {}
self.__log(traceback.format_exc())
else:
fem_dict['POWERSTRIP'] = {}
# Handle thermal cluster.
worker = self.workers.get('Cryostat-Worker', None)
working_dict = {}
if worker is not None:
try:
working_dict = worker.stateframe_query()
except Exception, s:
self.__log(traceback.format_exc())
worker = self.workers.get('Temp-Worker', None)
if worker is not None:
try:
working_dict['FOCUSBOX'] = worker.stateframe_query()
except Exception, e:
working_dict['FOCUSBOX'] = 0
self.__log(traceback.format_exc())
else:
working_dict['FOCUSBOX'] = 0
fem_dict['THERMAL'] = working_dict
# Handle receiver cluster.
worker = self.workers.get('BB-Worker', None)
working_dict = {}
if worker is not None:
try:
working_dict = worker.stateframe_query()
except Exception, e:
pass
working_dict['LOFREQSTATUS'] = 0
working_dict['HIFREQSTATUS'] = 0
working_dict['NOISESTATUS'] = 0
fem_dict['RECEIVER'] = working_dict
# Handle servo cluster.
worker = self.workers.get('GeoBrick-Worker', None)
if worker is not None:
try:
fem_dict['SERVO'] = worker.stateframe_query()
except Exception, e:
fem_dict['SERVO'] = {}
self.__log(traceback.format_exc())
else:
fem_dict['SERVO'] = {}
# Handle version.
fem_dict['VERSION'] = VERSION
# Handle timestamp
fem_dict['TIMESTAMP'] = time.time() + 2082844800
return {'FEM': fem_dict}
def send_stateframe_dict(self):
try:
fem_dict = self.make_stateframe_dict()
fmt, buf, xml = gen_fem_sf.gen_fem_sf(fem_dict)
packet_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
packet_socket.settimeout(0.3)
packet_socket.connect((self.acc_ip, ACC_PORT))
packet_socket.sendall(buf)
packet_socket.close()
# persec = open('/tmp/persec.txt', 'a')
# persec.write(buf + '\n')
# persec.close()
finally:
threading.Timer(0.3, self.send_stateframe_dict).start()
# region Method Description
"""
Method: run
Description:
Daemon routine for the ServerDaemon between the ACC and the Brick.
"""
# endregion
def run(self):
# Setup listener to this box at HOST_PORT.
acc_listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__log('Attempt to set up listener...')
try:
acc_listener.bind((HOST, HOST_PORT))
except socket.error, msg:
self.__log('Unable to listen at port ' + str(HOST_PORT) +
'. Error Code: ' + str(msg[0]) + '. Message: ' +
str(msg[1]))
sys.exit()
acc_listener.listen(1)
self.__log('Successfully setup listener')
polling_thread = threading.Thread(target=self.send_stateframe_dict)
polling_thread.start()
while True:
# Wait for a connection from ACC.
connection, address = acc_listener.accept()
self.__log('Connection from ' + address[0] +
':' + str(address[1]))
# Read packet sent from ACC, currently capped at 1024 bytes.
acc_command = connection.recv(1024)
self.__log('Command issued from connection: ' + acc_command)
acc_command = acc_command.split()
# Echo command issued back to ACC.
connection.sendall(acc_command[0])
# Verify that the given command exists and execute it with the
# correct worker if it does.
try:
worker = self.function_map[acc_command[0]]
worker.execute(acc_command)
except KeyError:
self.__log('Unrecognized command received: ' +
acc_command[0] + '.')
| mit | -2,742,736,555,021,004,000 | 31.527559 | 78 | 0.549141 | false |
chriswmackey/UWG_Python | tests/test_UWG.py | 1 | 22465 | """Test for uwg.py"""
import os
import pytest
from copy import deepcopy
from .test_base import auto_setup_uwg, set_input_manually
from uwg import SchDef, BEMDef, Building, Element, Material, UWG
from uwg.utilities import is_near_zero
def test_init():
"""Test initialization methods."""
test_dir = os.path.abspath(os.path.dirname(__file__))
param_path = os.path.join(test_dir, 'parameters',
'initialize_singapore.uwg')
epw_path = os.path.join(test_dir, 'epw', 'SGP_Singapore.486980_IWEC.epw')
refBEM, refSch = UWG.load_refDOE()
refBEM[0][2][0].building.shgc = 0.9
ref_bem_vec = [refBEM[0][2][0], refBEM[2][2][0]]
ref_sch_vec = [refSch[0][2][0], refSch[2][2][0]]
# base init
UWG(epw_path)
# from param_file
UWG.from_param_file(param_path, epw_path)
# from args
UWG.from_param_args(bldheight=10.0, blddensity=0.5, vertohor=0.5, zone='1A',
treecover=0.1, grasscover=0.1, epw_path=epw_path)
model = UWG.from_param_args(10.0, 0.5, 0.5, treecover=0.1, grasscover=0.1,
zone='1A', ref_bem_vector=[], ref_sch_vector=[],
epw_path=epw_path)
model.generate()
assert model.ref_bem_vector == []
assert model.ref_sch_vector == []
UWG.from_param_args(10.0, 0.5, 0.5, 0.1, 0.1, '1A',
ref_bem_vector=ref_bem_vec, ref_sch_vector=ref_sch_vec,
epw_path=epw_path)
with pytest.raises(AssertionError):
UWG.from_param_args(10.0, 0.5, 0.5, 0.1, 0.1, '1A',
ref_bem_vector=ref_bem_vec, ref_sch_vector=ref_sch_vec[:1])
with pytest.raises(AssertionError):
UWG.from_param_args(10.0, 0.5, 0.5, 0.1, 0.1, '1A',
ref_bem_vector=None, ref_sch_vector=ref_sch_vec)
with pytest.raises(Exception):
# No epw_path
model = UWG.from_param_args(
bldheight=10.0, blddensity=0.5, vertohor=0.5, grasscover=0.1,
treecover=0.1, zone='1A')
model.generate()
# from dict
data = UWG.from_param_args(
10.0, 0.5, 0.5, 0.1, 0.1, '1A').to_dict(include_refDOE=False)
UWG.from_dict(data)
model1 = UWG.from_param_args(
10.0, 0.5, 0.5, 0.1, 0.1, '1A', ref_bem_vector=ref_bem_vec,
ref_sch_vector=ref_sch_vec)
data = model1.to_dict(include_refDOE=True)
model2 = UWG.from_dict(data, epw_path=epw_path)
model2.generate()
assert model2.ref_bem_vector[0].building.shgc == pytest.approx(
0.9, abs=1e-10)
assert model2.refBEM[0][2][0].building.shgc == pytest.approx(
0.9, abs=1e-10)
def test_dict():
"""Test uwg to/from dict method."""
model1 = auto_setup_uwg()
# Set some optional values
model1.shgc = 0.3
model1.glzr = 0.5
model1.bld = [('outpatient', 'pre80', 0.1),
('outpatient', 'new', 0.9)]
# make dict
uwgdict = model1.to_dict()
# test if dict and from_dict
assert isinstance(uwgdict, dict)
model2 = model1.from_dict(uwgdict)
# Test attributes (including optional)
model1.flr_h is model2.flr_h
model1.blddensity == pytest.approx(model2.blddensity, abs=1e-10)
model1.c_circ == pytest.approx(model2.c_circ, abs=1e-10)
model1.shgc == pytest.approx(model2.shgc, abs=1e-10)
model1.glzr == pytest.approx(model2.glzr, abs=1e-10)
# bld matrix
model2.bld[0][2] == pytest.approx(0.1, abs=1e-10)
model2.bld[1][2] == pytest.approx(0.9, abs=1e-10)
# Test error
with pytest.raises(AssertionError):
uwgdict['type'] = 'Error'
model1.from_dict(uwgdict)
def test_sch_refDOE():
"""Test uwg from dict method with refDOE override for Schs."""
model1 = auto_setup_uwg()
# Set bld matrix and zone
model1.bld = [('hospital', 'new', 1)]
model1.zone = '1A'
# add schedule to type=2, era=3
testweek = [[0.1 for i in range(24)] for j in range(3)]
refbem = model1.refBEM[0][0][0]
refbem.bldtype, refbem.builtera = 'hospital', 'new'
model1._ref_bem_vector = [refbem]
model1._ref_sch_vector = \
[SchDef(elec=testweek, gas=testweek, light=testweek, occ=testweek, cool=testweek,
heat=testweek, swh=testweek, q_elec=18.9, q_gas=3.2, q_light=18.9,
n_occ=0.12, vent=0.0013, v_swh=0.2846, bldtype='hospital', builtera='new')]
model1.generate() # initialize BEM, Sch objects
# make dict
uwgdict = model1.to_dict(include_refDOE=True)
assert 'ref_sch_vector' in uwgdict
assert len(uwgdict['ref_sch_vector']) == 1
model2 = model1.from_dict(uwgdict, epw_path=model1.epw_path)
model2.generate()
# Check values
assert model2.bld[0][2] == pytest.approx(1, abs=1e-10)
testsch = model2.refSchedule[1][2][0]
for i in range(3):
for j in range(24):
assert testsch.elec[i][j] == pytest.approx(0.1, abs=1e-10)
assert testsch.swh[i][j] == pytest.approx(0.1, abs=1e-10)
# Test adding 1 extra type to bld on second row
model1 = auto_setup_uwg()
# Set bld matrix and zone
model1.bld = [('customwarehouse', 'new', 1)]
model1.zone = '1A'
testweek = [[0.2 for i in range(24)] for j in range(3)]
newsch = SchDef(elec=testweek, gas=testweek, light=testweek, occ=testweek,
cool=testweek, heat=testweek, swh=testweek, q_elec=18.9,
q_gas=3.2, q_light=18.9, n_occ=0.12, vent=0.0013, v_swh=0.2846,
bldtype='customwarehouse', builtera='new')
newbem = _generate_bemdef()
newbem.bldtype = 'customwarehouse'
newbem.builtera = 'new'
model1.ref_bem_vector = [newbem]
model1.ref_sch_vector = [newsch]
uwgdict = model1.to_dict(include_refDOE=True)
model2 = model1.from_dict(uwgdict, epw_path=model1.epw_path)
model2.generate()
# check lengths
assert len(uwgdict['ref_sch_vector']) == 1
assert len(model2.refSchedule) == 17
assert len(model2.bld) == 1
# Check values
testsch = model2.refSchedule[16][2][0]
for i in range(3):
for j in range(24):
assert testsch.elec[i][j] == pytest.approx(0.2, abs=1e-10)
assert testsch.swh[i][j] == pytest.approx(0.2, abs=1e-10)
def test_bem_refDOE():
"""Test uwg from dict method with refDOE override for BEMs."""
model1 = auto_setup_uwg()
# Set bld matrix and zone
model1.bld = [('hospital', 'new', 1)]
model1.zone = '1A'
# add schedule to type=1, era=2
bem = _generate_bemdef()
bem.bldtype = 'hospital'
bem.builtera = 'new'
bem.building.cop = 4000.0
bem.roof.emissivity = 0.001
model1.ref_bem_vector = [bem]
model1.ref_sch_vector = [model1.refSchedule[1][2][0]]
# make dict
uwgdict = model1.to_dict(include_refDOE=True)
assert 'ref_bem_vector' in uwgdict
assert len(uwgdict['ref_bem_vector']) == 1
model2 = model1.from_dict(uwgdict, epw_path=model1.epw_path)
# Test default values being overwritten with compute_BEM
assert model2.refBEM[1][2][0].frac == pytest.approx(0.0, abs=1e-10)
model2.generate()
# Object will be linked therefore modified
assert model2.refBEM[1][2][0].frac == pytest.approx(1, abs=1e-10)
# Check values
assert len(model2.BEM) == 1
testbem = model2.refBEM[1][2][0]
assert testbem.building.cop == pytest.approx(4000.0, 1e-10)
assert testbem.roof.emissivity == pytest.approx(0.001, abs=1e-10)
def test_customize_reference_data():
"""Test adding reference DOE data to UWG."""
model = auto_setup_uwg()
model.zone = '7'
zi = 14
# make new sched and unrealistic values
testweek = [[2000.0 for i in range(24)] for j in range(3)]
newsch1 = SchDef(elec=testweek, gas=testweek, light=testweek, occ=testweek,
cool=testweek, heat=testweek, swh=testweek, q_elec=18.9,
q_gas=3.2, q_light=18.9, n_occ=0.12, vent=0.0013, v_swh=0.2846,
bldtype='midriseapartment', builtera='pre80')
testweek = [[1000.0 for i in range(24)] for j in range(3)]
newsch2 = SchDef(elec=testweek, gas=testweek, light=testweek, occ=testweek,
cool=testweek, heat=testweek, swh=testweek, q_elec=18.9,
q_gas=3.2, q_light=18.9, n_occ=0.12, vent=0.0013, v_swh=0.2846,
bldtype='customtype', builtera='new')
# make new blds and add unrealistic values
bem1 = _generate_bemdef()
bem1.bldtype = 'midriseapartment'
bem1.builtera = 'pre80'
bem1.frac = 0.314 # will be ovewrtten
bem1.building.cop = 3000.0
bem1.roof.emissivity = 0.0
bem2 = deepcopy(_generate_bemdef())
bem2.bldtype = 'customtype'
bem2.builtera = 'new'
bem2.frac = 0.714 # will be ovewrtten
bem2.building.cop = 4000.0
bem2.roof.emissivity = 0.001
# test default lengths
assert len(model.refSchedule) == 16
assert len(model.refBEM) == 16
for day in model.refSchedule[5][0][zi].heat:
for hr in day:
assert not is_near_zero(hr - 2000.0, 1e-10)
assert model.refBEM[5][0][zi].frac == pytest.approx(0, 1e-10)
assert not is_near_zero(
model.refBEM[5][0][zi].building.cop - 3000.0, 1e-10)
assert not is_near_zero(
model.refBEM[5][0][zi].roof.emissivity - 0.0, 1e-10)
# run method
ref_sch_vec = [newsch1, newsch2]
ref_bem_vec = [bem1, bem2]
# set bld matrix and zone
model.bld = [('midriseapartment', 'pre80', 0.5), # test insertion
('customtype', 'new', 0.5)] # test insertion
model.ref_bem_vector, model.ref_sch_vector = \
model._check_reference_data(ref_bem_vec, ref_sch_vec)
with pytest.raises(Exception):
model.ref_bem_vector = ref_bem_vec
with pytest.raises(Exception):
model.ref_sch_vector = ref_sch_vec
model._customize_reference_data()
# Test customized schedules
assert len(model.refSchedule) == 17
for day in model.refSchedule[5][0][zi].heat:
for hr in day:
assert is_near_zero(hr - 2000.0, 1e-10)
for day in model.refSchedule[16][2][zi].heat:
for hr in day:
assert is_near_zero(hr - 1000.0, 1e-10)
# Test customised bemdefs
assert len(model.refBEM) == 17
assert is_near_zero(model.refBEM[5][0][zi].frac - 0.314, 1e-10)
assert is_near_zero(model.refBEM[5][0][zi].building.cop - 3000.0, 1e-10)
assert is_near_zero(model.refBEM[5][0][zi].roof.emissivity - 0.0, 1e-10)
assert is_near_zero(model.refBEM[16][2][zi].frac - 0.714, 1e-10)
assert is_near_zero(model.refBEM[16][2][zi].building.cop - 4000.0, 1e-10)
assert is_near_zero(model.refBEM[16][2][zi].roof.emissivity - 0.001, 1e-10)
def test_read_epw():
"""Test read epw"""
model = auto_setup_uwg()
model._read_epw()
# test header
assert model._header[0][0] == 'LOCATION'
assert model._header[0][1] == 'SINGAPORE'
assert model.lat == pytest.approx(1.37, abs=1e-3)
assert model.lon == pytest.approx(103.98, abs=1e-3)
assert model.gmt == pytest.approx(8, abs=1e-3)
# test soil data
assert model.nSoil == pytest.approx(3, abs=1e-2)
# test soil depths
assert model.depth_soil[0][0] == pytest.approx(0.5, abs=1e-3)
assert model.depth_soil[1][0] == pytest.approx(2., abs=1e-3)
assert model.depth_soil[2][0] == pytest.approx(4., abs=1e-3)
# test soil temps over 12 months
assert model.Tsoil[0][0] == pytest.approx(27.55+273.15, abs=1e-3)
assert model.Tsoil[1][2] == pytest.approx(28.01+273.15, abs=1e-3)
assert model.Tsoil[2][11] == pytest.approx(27.07+273.15, abs=1e-3)
# test time step in weather file
assert model.epwinput[0][0] == '1989'
assert float(model.epwinput[3][6]) == pytest.approx(24.1, abs=1e-3)
def test_read_input():
"""Test read input."""
model = auto_setup_uwg()
model.generate()
# test uwg param dictionary first and last
assert 'bldheight' in model._init_param_dict
assert 'h_obs' in model._init_param_dict
assert model._init_param_dict['bldheight'] == pytest.approx(10., abs=1e-6)
assert model._init_param_dict['vegend'] == pytest.approx(10, abs=1e-6)
assert model._init_param_dict['albroof'] is None
assert model._init_param_dict['h_ubl1'] == pytest.approx(1000., abs=1e-6)
assert model._init_param_dict['h_ref'] == pytest.approx(150., abs=1e-6)
# test SchTraffic schedule
# first
assert model._init_param_dict['schtraffic'][0][0] == pytest.approx(
0.2, abs=1e-6)
# last
assert model._init_param_dict['schtraffic'][2][23] == pytest.approx(
0.2, abs=1e-6)
assert model._init_param_dict['schtraffic'][0][19] == pytest.approx(
0.8, abs=1e-6)
assert model._init_param_dict['schtraffic'][1][21] == pytest.approx(
0.3, abs=1e-6)
assert model._init_param_dict['schtraffic'][2][6] == pytest.approx(
0.4, abs=1e-6)
# test bld fraction list
assert len(model._init_param_dict['bld']) == 2
assert model._init_param_dict['bld'][0][2] == pytest.approx(0.4, abs=1e-6)
assert model._init_param_dict['bld'][1][2] == pytest.approx(0.6, abs=1e-6)
# test BEMs
assert len(model.BEM) == 2
# test BEM office (BLD4 in DOE)
assert model.BEM[0].bldtype == 'largeoffice'
assert model.BEM[0].zonetype == '1A'
assert model.BEM[0].builtera == 'pst80'
assert model.BEM[0].frac == 0.4
# test BEM apartment
assert model.BEM[1].bldtype == 'midriseapartment'
assert model.BEM[1].zonetype == '1A'
assert model.BEM[1].builtera == 'pst80'
assert model.BEM[1].frac == 0.6
# Check that schedules are called correctly
# 9am on Weekday for Office
assert model.Sch[0].light[0][8] == pytest.approx(0.9, abs=1e-6)
# 9am on Weekday for Office
assert model.Sch[0].light[0][7] == pytest.approx(0.3, abs=1e-6)
# 12 noon on Weekend for apt
assert model.Sch[1].occ[1][11] == pytest.approx(0.25, abs=1e-6)
# Check that soil ground depth is set correctly
assert model.depth_soil[model._soilindex1][0] == pytest.approx(
0.5, abs=1e-6)
assert model.depth_soil[model._soilindex2][0] == pytest.approx(
0.5, abs=1e-6)
# Check the road layer splitting
assert len(model.road.layer_thickness_lst) == pytest.approx(11., abs=1e-15)
assert model.road.layer_thickness_lst[0] == pytest.approx(0.05, abs=1e-15)
# Check the road layer splitting for rural
assert len(model.rural.layer_thickness_lst) == pytest.approx(
11., abs=1e-15)
assert model.rural.layer_thickness_lst[0] == pytest.approx(0.05, abs=1e-6)
def test_optional_blank_parameters():
model = auto_setup_uwg(param_path=None)
model = set_input_manually(model)
model.generate()
assert model.BEM[0].building.glazing_ratio == pytest.approx(
0.38, abs=1e-15)
assert model.BEM[0].roof.albedo == pytest.approx(0.2, abs=1e-15)
assert model.BEM[0].roof.vegcoverage == pytest.approx(0.0, abs=1e-15)
assert model.BEM[1].roof.albedo == pytest.approx(0.2, abs=1e-15)
assert model.BEM[1].building.glazing_ratio == pytest.approx(
0.1499, abs=1e-15)
assert model.BEM[1].roof.vegcoverage == pytest.approx(0.0, abs=1e-15)
def test_optional_inputted_parameters():
model = auto_setup_uwg(param_path=None)
model = set_input_manually(model)
# test __repr__
model.generate()
model.__repr__()
# Test setting values
with pytest.raises(AssertionError):
model.albroad = 5
# Test setting values
with pytest.raises(AssertionError):
model.glzr = 5
# Test setting values
with pytest.raises(AssertionError):
model.month = 50
# Set optional parameters
model.albroof = .5
model.vegroof = .1
model.glzr = .5
model.albwall = 0.91
model.shgc = 0.65
model.flr_h = 4.5
# From blank inputs will be from DOE
model.generate()
assert model.BEM[0].building.glazing_ratio == pytest.approx(0.5, abs=1e-15)
assert model.BEM[0].roof.albedo == pytest.approx(0.5, abs=1e-15)
assert model.BEM[0].roof.vegcoverage == pytest.approx(0.1, abs=1e-15)
assert model.BEM[1].building.glazing_ratio == pytest.approx(0.5, abs=1e-15)
assert model.BEM[1].roof.albedo == pytest.approx(0.5, abs=1e-15)
assert model.BEM[1].roof.vegcoverage == pytest.approx(0.1, abs=1e-15)
assert model.BEM[0].wall.albedo == pytest.approx(0.91, abs=1e-15)
assert model.BEM[1].building.shgc == pytest.approx(0.65, abs=1e-15)
assert model.BEM[0].building.floor_height == pytest.approx(4.5, abs=1e-15)
def test_procMat():
"""
Test different max/min layer depths that generate different diffrent road layer
thicknesses (to account for too deep elements with inaccurate heat transfer).
"""
model = auto_setup_uwg()
model.generate()
# test a 0.5m road split into 10 slices of 0.05m
# base case; min=0.01, max=0.05, stays the same
roadMat, newthickness = model._procmat(model.road, 0.05, 0.01)
assert len(roadMat) == pytest.approx(11, abs=1e-6)
assert len(newthickness) == pytest.approx(11, abs=1e-6)
assert sum(newthickness) == pytest.approx(0.05*11, abs=1e-6)
# modify to one layer for tests
model.road.layer_thickness_lst = [0.05]
model.road.layerThermalCond = model.road.layerThermalCond[:1]
model.road.layerVolHeat = model.road.layerVolHeat[:1]
# 0.05 layer, will split in two
roadMat, newthickness = model._procmat(model.road, 0.05, 0.01)
assert len(roadMat) == pytest.approx(2, abs=1e-6)
assert len(newthickness) == pytest.approx(2, abs=1e-6)
assert sum(newthickness) == pytest.approx(0.025*2, abs=1e-6)
# 0.015 layer, will split in min thickness in two
model.road.layer_thickness_lst = [0.015]
roadMat, newthickness = model._procmat(model.road, 0.05, 0.01)
assert len(roadMat) == pytest.approx(2, abs=1e-6)
assert len(newthickness) == pytest.approx(2, abs=1e-6)
assert sum(newthickness) == pytest.approx(0.005*2, abs=1e-6)
# 0.12 layer, will split into 3 layers b/c > max_thickness
model.road.layer_thickness_lst = [0.12]
roadMat, newthickness = model._procmat(model.road, 0.05, 0.01)
assert len(roadMat) == pytest.approx(3, abs=1e-6)
assert len(newthickness) == pytest.approx(3, abs=1e-6)
assert sum(newthickness) == pytest.approx(0.04*3, abs=1e-6)
def test_hvac_autosize():
"""Test hvace autosize"""
model = auto_setup_uwg()
model.generate()
# Test setting values
with pytest.raises(AssertionError):
model.autosize = [1, 2, 3]
assert model.autosize is False
assert model.autosize == 0
assert len(model.BEM) == pytest.approx(2, abs=1e-6)
# coolCap and heatCap don't retain high accuracy when extracted from the
# DOE reference csv, so we will reduce the tolerance here
assert model.BEM[0].building.coolcap == \
pytest.approx((3525.66904 * 1000.0) / 46320.0, abs=1e-3)
assert model.BEM[0].building.heat_cap == \
pytest.approx((2875.97378 * 1000.0) / 46320.0, abs=1e-3)
assert model.BEM[1].building.coolcap \
== pytest.approx((252.20895 * 1000.0) / 3135., abs=1e-2)
assert model.BEM[1].building.heat_cap \
== pytest.approx((132.396 * 1000.0) / 3135., abs=1e-2)
model.autosize = True
assert model.autosize == 1
def test_simulate():
"""Test UWG simulation."""
model = auto_setup_uwg()
model.generate()
model.simulate()
model.write_epw()
# Parameters from initialize.uwg
# Month = 1; % starting month (1-12)
# Day = 1; % starting day (1-31)
# nDay = 31; % number of days
# dtSim = 300; % simulation time step (s)
# dtWeather = 3600; % weather time step (s)
# total hours in simulation
assert model.N == pytest.approx(744., abs=1e-6)
assert model.ph == pytest.approx(
0.083333, abs=1e-6) # dt (sim time step) hours
# test the weather data time series is equal to time step
assert len(model.forcIP.infra) == \
pytest.approx((model.simTime.nt - 1) / 12., abs=1e-3)
# check that simulation time is happening every 5 minutes 8928
assert model.simTime.nt-1 == pytest.approx(31*24*3600/300., abs=1e-3)
# check that weather step time is happening every 1 hour = 744
assert len(model.forcIP.dif) == pytest.approx(31 * 24, abs=1e-3)
# check that final day of timestep is at correct dayType
assert model.dayType == pytest.approx(1., abs=1e-3)
assert model.schtraffic[model.dayType - 1][model.simTime.hourDay] == \
pytest.approx(0.2, abs=1e-6)
def _generate_bemdef():
"""Create BEMDef: LargeOffce, Pst80, Zone 1A (Miami)."""
# Material: (thermalCond, volHeat = specific heat * density)
concrete = Material(1.311, 836.8 * 2240, 'Concrete')
gypsum = Material(0.16, 830.0 * 784.9, 'Gypsum')
stucco = Material(0.6918, 837.0 * 1858.0, 'Stucco')
insulation = Material(0.049, 836.8 * 265.0, "Insulation")
# Mass wall for LargeOffce, Pst80, Zone 1A (Miami)
thicknessLst = [0.0254, 0.0508, 0.0508, 0.0508, 0.0508, 0.0127]
materialLst = [stucco, concrete, concrete, concrete, concrete, gypsum]
wall = Element(albedo=0.08, emissivity=0.92, layer_thickness_lst=thicknessLst,
material_lst=materialLst, vegcoverage=0, t_init=293,
horizontal=False, name='MassWall')
# IEAD roof
thicknessLst = [0.058, 0.058]
materialLst = [insulation, insulation]
roof = Element(albedo=0.2, emissivity=0.93, layer_thickness_lst=thicknessLst,
material_lst=materialLst, vegcoverage=0.5, t_init=293,
horizontal=True, name='IEAD')
# Mass floor
thicknessLst = [0.054, 0.054]
materialLst = [concrete, concrete]
floor = Element(albedo=0.2, emissivity=0.9, layer_thickness_lst=thicknessLst,
material_lst=materialLst, vegcoverage=0.0, t_init=293,
horizontal=True, name='MassFloor')
bld = Building(floor_height=3.5, int_heat_night=1, int_heat_day=1, int_heat_frad=0.1,
int_heat_flat=0.1, infil=0.26, vent=0.0005, glazing_ratio=0.4,
u_value=5.8, shgc=0.2, condtype='AIR', cop=5.2, coolcap=76,
heateff=0.7, initial_temp=293)
return BEMDef(bld, floor, wall, roof, bldtype='fullservicerestaurant', builtera='pst80')
| gpl-3.0 | 7,150,451,097,433,163,000 | 36.25539 | 92 | 0.625061 | false |
shendri4/devil_wgs | BQSR_perInd_Devils.py | 1 | 3647 | #!/usr/bin/env python
#import argparse
#from glob import glob
#-s test_samples.txt
#-b /mnt/lfs2/hend6746/devils/reference/sarHar1.fa
#-k /mnt/lfs2/hend6746/taz/filtered_plink_files/export_data_150907/seventy.1-2.nodoubletons.noparalogs.noX.plink.oneperlocus.vcf
from os.path import join as jp
from os.path import abspath
import os
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', "--samples", help="Samples.txt file with sample ID.", required=True)
parser.add_argument('-b', "--bwaindex", help="Path to bwa index file.", required=True)
parser.add_argument('-k', "--knownsites", help="Path and fileName of filteredSNP.vcf.", required=True)
args = parser.parse_args()
#args = parser.parse_args('-s samples.txt -r /mnt/lfs2/hend6746/fox_cancer/0rawdata_test -b /mnt/lfs2/hend6746/wolves/reference/canfam31/canfam31.fa'.split())
VERBOSE=False
#Function definitions:
def log(txt, out):
if VERBOSE:
print(txt)
out.write(txt+'\n')
out.flush()
## Read in samples and put them in a list:
samples = []
for l in open(args.samples):
if len(l) > 1:
samples.append(l.split('/')[-1].replace('.fastq.1.gz', '').strip())
# Setup folders and paths variables:
bamFolder = abspath('02-Mapped')
variantFolder = abspath('03-Calls')
PBS_scripts = abspath('BQSR_PBS_scripts')
#rawdataDir = abspath(args.rawdata)
bwaIndex = abspath(args.bwaindex)
knownSites = abspath(args.knownsites)
gatkCall = 'java -jar /opt/modules/biology/gatk/3.5/bin/GenomeAnalysisTK.jar -R %s' % bwaIndex
os.system('mkdir -p %s' % bamFolder)
os.system('mkdir -p %s' % variantFolder)
os.system('mkdir -p %s' % PBS_scripts)
##### Run pipeline ###
for sample in samples:
print "Processing", sample, "....."
# Set up files:
logFile = jp(bamFolder, sample + '_BQSR.log')
logCommands = open(jp(PBS_scripts, sample + '_BQSR_commands.sh'), 'w')
#Setup for qsub
log('#!/bin/bash', logCommands)
log('#PBS -N %s' % sample, logCommands)
log('#PBS -j oe', logCommands)
log('#PBS -o %s_job.log' % sample, logCommands)
log('#PBS -m abe', logCommands)
log('#PBS -M [email protected]', logCommands)
log('#PBS -q short', logCommands)
log('#PBS -l mem=100gb', logCommands)
log(". /usr/modules/init/bash", logCommands)
log("module load python/2.7.10", logCommands)
log("module load grc", logCommands)
####################
# BaseQualityRecalibration
# Step 1: First run of BQSR: BaseRecalibrator
####################
cmd = ' '.join([gatkCall, ' -nct 24 ',
' -T BaseRecalibrator ', ' -I ' + jp(bamFolder, sample) + '_markdup.bam', ' -knownSites ' + knownSites,
' -o ' + jp(bamFolder, sample) + '_BQSR.table', '>>', logFile, '2>&1'])
log(cmd, logCommands)
####################
# BaseQualityRecalibration
# Step 2: BaseRecalibrator on recalibrated files
####################
cmd = ' '.join([gatkCall, ' -nct 24 ',
' -T BaseRecalibrator ',
' -I ' + jp(bamFolder, sample) + '_markdup.bam',
' -knownSites ' + knownSites,
' -BQSR ' + jp(bamFolder, sample) + '_BQSR.table'
' -o ' + jp(bamFolder, sample) + '_BQSR_FIXED.table', '>>', logFile, '2>&1'])
log(cmd, logCommands)
####################
# BaseQualityRecalibration
# Step 3: PrintReads
# Apply recalibration table to original bam file
####################
cmd = ' '.join([gatkCall, ' -nct 24 ',
' -T PrintReads ',
' -I ' + jp(bamFolder, sample) + '_markdup.bam',
' -BQSR ' + jp(bamFolder, sample) + '_BQSR_FIXED.table'
' -o ' + jp(bamFolder, sample) + '_markdup_BQSR_FIXED.bam', '>>', logFile, '2>&1'])
log(cmd, logCommands)
logCommands.close()
| apache-2.0 | 2,987,812,366,922,128,000 | 33.733333 | 158 | 0.635043 | false |
vesellov/bitdust.devel | system/bpio.py | 1 | 34436 | #!/usr/bin/python
# bpio.py
#
# Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io
#
# This file (bpio.py) is part of BitDust Software.
#
# BitDust is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BitDust Software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with BitDust Software. If not, see <http://www.gnu.org/licenses/>.
#
# Please contact us if you have any questions at [email protected]
#
#
#
#
"""
.. module:: bpio.
This module is for simple BitDust routines that do not require importing any of our code.:
- print logs
- file system IO operations
- pack/unpack lists and dictionaries into strings
- some methods to operate with file system paths
- list Linux mount points and Windows drives
- methods to manage system processes
Most used method here is ``log`` - prints a log string.
TODO: need to do some refactoring here
"""
#------------------------------------------------------------------------------
from __future__ import absolute_import
from io import open
#------------------------------------------------------------------------------
import os
import sys
import imp
import platform
import glob
import re
#------------------------------------------------------------------------------
from lib import strng
from logs import lg
from system import local_fs
#------------------------------------------------------------------------------
LocaleInstalled = False
PlatformInfo = None
X11isRunning = None
#------------------------------------------------------------------------------
def init():
"""
This method must be called firstly, before any logs will be printed.
This installs a system locale, so all output messages will have a
correct encoding.
"""
InstallLocale()
if Linux() or Mac():
lg.setup_unbuffered_stdout()
def shutdown():
"""
This is the last method to be invoked by the program before main process
will stop.
"""
lg.restore_original_stdout()
lg.close_log_file()
lg.disable_logs()
def InstallLocale():
"""
Here is a small trick to refresh current default encoding.
"""
global LocaleInstalled
if LocaleInstalled:
return False
try:
import sys
reload(sys)
if Windows():
if hasattr(sys, "setdefaultencoding"):
import locale
denc = locale.getpreferredencoding()
if not denc:
sys.setdefaultencoding('UTF8')
else:
sys.setdefaultencoding(denc)
else:
sys.setdefaultencoding('UTF8')
LocaleInstalled = True
except:
pass
return LocaleInstalled
def ostype():
"""
Return current platform: "Linux", "Windows", "Darwin".
MacOS is not supported yet. Don't print anything in ostype because
used in bppipe.py and stdout goes to tar file.
"""
global PlatformInfo
if PlatformInfo is None:
PlatformInfo = platform.uname()
return PlatformInfo[0]
def osversion():
"""
Return something like: "2.6.32.9-rscloud" or "XP".
"""
global PlatformInfo
if PlatformInfo is None:
PlatformInfo = platform.uname()
return PlatformInfo[2]
def osinfo():
"""
Return full OS info, like: "Linux-2.6.32.9-rscloud-x86_64-with-
Ubuntu-12.04-precise" or "Windows-XP-5.1.2600-SP3".
"""
return str(platform.platform()).strip()
def osinfofull():
"""
Return detailed system info.
"""
import pprint
o = ''
o += '=====================================================\n'
o += '=====================================================\n'
o += '=====================================================\n'
o += 'platform.uname(): ' + str(platform.uname()) + '\n'
try:
o += '__file__: ' + str(__file__) + '\n'
except:
o += 'variable __file__ is not defined\n'
o += 'sys.executable: ' + sys.executable + '\n'
o += 'os.path.abspath("."): ' + os.path.abspath('.') + '\n'
o += 'os.path.abspath(sys.argv[0]): ' + os.path.abspath(sys.argv[0]) + '\n'
o += 'os.path.expanduser("~"): ' + os.path.expanduser('~') + '\n'
o += 'sys.argv: ' + pprint.pformat(sys.argv) + '\n'
o += 'sys.path:\n' + pprint.pformat(sys.path) + '\n'
o += 'os.environ:\n' + pprint.pformat(list(os.environ.items())) + '\n'
o += '=====================================================\n'
o += '=====================================================\n'
o += '=====================================================\n'
return o
def windows_version():
"""
Useful to detect current Windows version: XP, Vista, 7 or 8.
"""
if getattr(sys, 'getwindowsversion', None) is not None:
return sys.getwindowsversion()[0]
return 0
def Linux():
"""
Return True if current platform is Linux.
"""
return ostype() == "Linux"
def Windows():
"""
Return True if current platform is Windows.
"""
return ostype() == "Windows"
def Mac():
"""
Return True if current platform is Mac.
"""
return ostype() == "Darwin"
def isFrozen():
"""
Return True if BitDust is running from exe, not from sources.
"""
return main_is_frozen()
def isConsoled():
"""
Return True if output can be sent to console.
"""
if getExecutableFilename().count('pythonw.exe'):
return False
if not sys.stdout:
return False
return True
#-------------------------------------------------------------------------------
def list_dir_safe(dirpath):
"""
A safe wrapper around built-in ``os.listdir()`` method.
"""
try:
return os.listdir(dirpath)
except:
return []
def list_dir_recursive(dirpath):
"""
Recursively scan files and folders under ``dirpath`` and return them in the
list.
"""
r = []
for name in os.listdir(dirpath):
full_name = os.path.join(dirpath, name)
if os.path.isdir(full_name):
r.extend(list_dir_recursive(full_name))
else:
r.append(full_name)
return r
def traverse_dir_recursive(callback, basepath, relpath=''):
"""
Call ``callback`` method for every file and folder under ``basepath``.
If method ``callback`` can returns False traverse process will not
go deeper. Useful to count size of the whole folder.
"""
for name in os.listdir(basepath):
realpath = os.path.join(basepath, name)
subpath = name if relpath == '' else relpath + '/' + name
go_down = callback(realpath, subpath, name)
if os.path.isdir(realpath) and go_down:
traverse_dir_recursive(callback, realpath, subpath)
def rmdir_recursive(dirpath, ignore_errors=False, pre_callback=None):
"""
Remove a directory, and all its contents if it is not already empty.
http://mail.python.org/pipermail/python-
list/2000-December/060960.html If ``ignore_errors`` is True process
will continue even if some errors happens. Method ``pre_callback``
can be used to decide before remove the file.
"""
for name in os.listdir(dirpath):
full_name = os.path.join(dirpath, name)
# on Windows, if we don't have write permission we can't remove
# the file/directory either, so turn that on
if not os.access(full_name, os.W_OK):
try:
os.chmod(full_name, 0o600)
except:
continue
if os.path.isdir(full_name):
rmdir_recursive(full_name, ignore_errors, pre_callback)
else:
if pre_callback:
if not pre_callback(full_name):
continue
if os.path.isfile(full_name):
if not ignore_errors:
os.remove(full_name)
else:
try:
os.remove(full_name)
except:
lg.out(6, 'bpio.rmdir_recursive can not remove file ' + full_name)
continue
if pre_callback:
if not pre_callback(dirpath):
return
if not ignore_errors:
os.rmdir(dirpath)
else:
try:
os.rmdir(dirpath)
except:
lg.out(6, 'bpio.rmdir_recursive can not remove dir ' + dirpath)
def getDirectorySize(directory, include_subfolders=True):
"""
Platform dependent way to calculate folder size.
"""
if Windows():
import win32file
import win32con
import pywintypes
DIR_EXCLUDES = set(['.', '..'])
MASK = win32con.FILE_ATTRIBUTE_DIRECTORY | win32con.FILE_ATTRIBUTE_SYSTEM
REQUIRED = win32con.FILE_ATTRIBUTE_DIRECTORY
FindFilesW = win32file.FindFilesW
def _get_dir_size(path):
total_size = 0
try:
items = FindFilesW(path + r'\*')
except pywintypes.error as ex:
return total_size
for item in items:
total_size += item[5]
if item[0] & MASK == REQUIRED and include_subfolders:
name = item[8]
if name not in DIR_EXCLUDES:
total_size += _get_dir_size(path + '\\' + name)
return total_size
return _get_dir_size(directory)
dir_size = 0
if not include_subfolders:
for filename in os.listdir(directory):
filepath = os.path.abspath(os.path.join(directory, filename))
if os.path.isfile(filepath):
try:
dir_size += os.path.getsize(filepath)
except:
pass
else:
for (path, dirs, files) in os.walk(directory):
for file in files:
filename = os.path.join(path, file)
if os.path.isfile(filename):
try:
dir_size += os.path.getsize(filename)
except:
pass
return dir_size
#-------------------------------------------------------------------------------
def WriteBinaryFile(filename, data):
return local_fs.WriteBinaryFile(filename=filename, data=data)
def ReadBinaryFile(filename, decode_encoding=None):
return local_fs.ReadBinaryFile(filename=filename, decode_encoding=decode_encoding)
def WriteTextFile(filepath, data):
return local_fs.WriteTextFile(filepath=filepath, data=data)
def ReadTextFile(filename):
return local_fs.ReadTextFile(filename=filename)
#-------------------------------------------------------------------------------
def _pack_list(lst):
"""
The core method, convert list of strings to one big string.
Every line in the string will store a single item from list. First
line will keep a number of items. So items in the list should be a
strings and not contain "\n".\ This is useful to store a list of
users IDs in the local file.
"""
return str(len(lst)) + u'\n' + u'\n'.join(lst)
def _unpack_list(src):
"""
The core method, read a list from string.
Return a tuple : (resulted list, list with lines from rest string or None).
First line of the ``src`` should contain a number of items in the list.
"""
if not src.strip():
return list(), None
words = src.splitlines()
if len(words) == 0:
return list(), None
try:
length = int(words[0])
except:
return words, None
res = words[1:]
if len(res) < length:
res += [u''] * (length - len(res))
elif len(res) > length:
return res[:length], res[length:]
return res, None
def _read_list(path):
"""
Read list from file on disk.
"""
src = ReadTextFile(path)
if src is None:
return None
return _unpack_list(src)[0]
def _write_list(path, lst):
"""
Write a list to the local file.
"""
return WriteTextFile(path, _pack_list(lst))
def _pack_dict(dictionary, sort=False):
"""
The core method, convert dictionary to the string.
Every line in resulted string will contain a key, value pair,
separated with single space. So keys and must not contain spaces.
Values must not contain new lines. If ``sort`` is True the resulted
string will be sorted by keys.
"""
if sort:
seq = sorted(dictionary.keys())
else:
seq = list(dictionary.keys())
return u'\n'.join([u'%s %s' % (k, strng.to_text(str(dictionary[k]))) for k in seq])
def _unpack_dict_from_list(lines):
"""
Read dictionary from list, every item in the list is a string with (key,
value) pair, separated with space.
"""
dct = {}
for line in lines:
words = line.split(u' ')
if len(words) < 2:
continue
dct[words[0]] = u' '.join(words[1:])
return dct
def _unpack_dict(src):
"""
The core method, creates dictionary from string.
"""
lines = strng.to_text(src).split(u'\n')
return _unpack_dict_from_list(lines)
def _read_dict(path, default=None):
"""
Read dictionary from local file.
If file not exist or no read access - returns ``default`` value.
"""
src = ReadTextFile(path)
if src is None:
return default
return _unpack_dict(src.strip())
def _write_dict(path, dictionary, sort=False):
"""
Write dictionary to the file.
"""
data = _pack_dict(dictionary, sort)
return WriteTextFile(path, data)
def _dir_exist(path):
"""
Just calls os.path.isdir() method.
"""
return os.path.isdir(path)
def _dir_make(path):
"""
Creates a new folder on disk, call built-in os.mkdir() method.
Set access mode to 0777.
"""
os.mkdir(path, 0o777)
def _dirs_make(path):
"""
Create a new folder and all sub dirs, call built-in os.makedirs() method.
Set access mode to 0777.
"""
os.makedirs(path, 0o777)
def _dir_remove(path):
"""
Remove directory recursively.
"""
rmdir_recursive(path)
#------------------------------------------------------------------------------
def backup_and_remove(path):
"""
Backup and remove the file.
Backed up file will have ".backup" at the end. In fact it just tries
to rename the original file, but also do some checking before. If
file with ".backup" already exist it will try to remove it before.
"""
bkpath = path + '.backup'
if not os.path.exists(path):
return
if os.path.exists(bkpath):
try:
os.remove(bkpath)
except:
lg.out(1, 'bpio.backup_and_remove ERROR can not remove file ' + bkpath)
lg.exc()
try:
os.rename(path, bkpath)
except:
lg.out(1, 'bpio.backup_and_remove ERROR can not rename file %s to %s' % (path, bkpath))
lg.exc()
if os.path.exists(path):
try:
os.remove(path)
except:
lg.out(1, 'bpio.backup_and_remove ERROR can not remove file ' + path)
lg.exc()
def restore_and_remove(path, overwrite_existing=False):
"""
Restore file and remove the backed up copy.
Just renames the file with ".backup" at the end to its 'true' name.
This is reverse method to ``backup_and_remove``.
"""
bkpath = path + '.backup'
if not os.path.exists(bkpath):
return
if os.path.exists(path):
if not overwrite_existing:
return
try:
os.remove(path)
except:
lg.out(1, 'bpio.restore_and_remove ERROR can not remove file ' + path)
lg.exc()
try:
os.rename(bkpath, path)
except:
lg.out(1, 'bpio.restore_and_remove ERROR can not rename file %s to %s' % (path, bkpath))
lg.exc()
def remove_backuped_file(path):
"""
Tries to remove the file with ".backup" at the end.
"""
bkpath = path + '.backup'
if not os.path.exists(bkpath):
return
try:
os.remove(bkpath)
except:
lg.out(1, 'bpio.remove_backuped_file ERROR can not remove file ' + bkpath)
lg.exc()
#------------------------------------------------------------------------------
def LowerPriority():
"""
Platform dependent method to lower the priority of the running process.
"""
try:
sys.getwindowsversion()
except:
isWindows = False
else:
isWindows = True
if isWindows:
# Based on:
# "Recipe 496767: Set Process Priority In Windows" on ActiveState
# http://code.activestate.com/recipes/496767/
import win32api
import win32process
import win32con
pid = win32api.GetCurrentProcessId()
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)
win32process.SetPriorityClass(handle, win32process.BELOW_NORMAL_PRIORITY_CLASS)
else:
import os
os.nice(20)
def HigherPriority():
try:
sys.getwindowsversion()
except:
isWindows = False
else:
isWindows = True
if isWindows:
import win32api
import win32process
import win32con
pid = win32api.GetCurrentProcessId()
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)
win32process.SetPriorityClass(handle, win32process.REALTIME_PRIORITY_CLASS)
else:
import os
os.nice(1)
#-------------------------------------------------------------------------------
def shortPath(path):
"""
Get absolute 'short' path in Unicode, converts to 8.3 windows filenames.
"""
path_ = os.path.abspath(path)
if not Windows():
return strng.to_text(path_)
if not os.path.exists(path_):
if os.path.isdir(os.path.dirname(path_)):
res = shortPath(os.path.dirname(path_))
return strng.to_text(os.path.join(res, os.path.basename(path_)))
return strng.to_text(path_)
try:
import win32api
spath = win32api.GetShortPathName(path_)
return strng.to_text(spath)
except:
lg.exc()
return strng.to_text(path_)
def longPath(path):
"""
Get absolute 'long' path in Unicode, convert to full path, even if it was
in 8.3 format.
"""
path_ = os.path.abspath(path)
if not Windows():
return strng.to_text(path_)
if not os.path.exists(path_):
return strng.to_text(path_)
try:
import win32api
lpath = win32api.GetLongPathName(path_)
return strng.to_text(lpath)
except:
lg.exc()
return strng.to_text(path_)
# def portablePath(path):
# """
# For Windows changes all separators to Linux format:
# - "\\" -> "/"
# - "\" -> "/"
# If ``path`` is unicode convert to utf-8.
# """
# p = path
# if Windows():
# p = p.replace('\\\\', '/').replace('\\', '/')
# if isinstance(p, unicode):
# return p.encode('utf-8')
# return p
def remotePath(path):
"""
Simplify and clean "remote" path value.
"""
p = strng.to_text(path)
if p == u'' or p == u'/':
return p
p = p.lstrip(u'/').lstrip(u'\\')
if p.endswith(u'/') and len(p) > 1:
p = p.rstrip(u'/')
return p
def portablePath(path):
"""
Fix path to fit for our use:
- do convert to absolute path
- for Windows:
- change all separators to Linux format: "\\"->"/" and "\"=>"/"
- convert disk letter to lower case
- convert to unicode
"""
path = strng.to_text(path)
if path == u'' or path == u'/':
return path
if Windows() and len(path) == 2 and path[1] == u':':
# "C:" -> "C:/"
path += u'/'
if path.count(u'~'):
path = os.path.expanduser(path)
p = os.path.abspath(path)
if Windows():
p = p.replace(u'\\', u'/')
if len(p) >= 2:
if p[1] == u':':
p = p[0].lower() + p[1:]
elif p[:2] == u'//':
p = u'\\\\' + p[2:]
if p.endswith(u'/') and len(p) > 1:
p = p.rstrip(u'/')
return p
def pathExist(localpath):
"""
My own "portable" version of built-in ``os.path.exist()`` method.
"""
if os.path.exists(localpath):
return True
p = portablePath(localpath)
if os.path.exists(p):
return True
if Windows() and pathIsNetworkLocation(localpath):
return True
return False
def pathIsDir(localpath):
"""
Assume localpath is exist and return True if this is a folder.
"""
if os.path.isdir(localpath):
return True
if os.path.exists(localpath) and os.path.isfile(localpath):
return False
# don't know... let's try portable path
p = portablePath(localpath)
if os.path.isdir(p):
return True
if os.path.exists(localpath) and os.path.isfile(p):
return False
if Windows() and pathIsNetworkLocation(localpath):
return True
# may be path is not exist at all?
if not os.path.exists(localpath):
return False
if not os.path.exists(p):
return False
# ok, on Linux we have devices, mounts, links ...
if Linux():
try:
import stat
st = os.path.stat(localpath)
return stat.S_ISDIR(st.st_mode)
except:
return False
# now we are in really big trouble
raise Exception('Path not exist: %s' % p)
return False
def pathIsDriveLetter(path):
"""
Return True if ``path`` is a Windows drive letter.
"""
p = path.rstrip('/').rstrip('\\')
if len(p) != 2:
return False
if p[1] != ':':
return False
if not p[0].isalpha():
return False
return True
def pathIsNetworkLocation(path):
"""
Return True if ``path`` is a Windows network location.
>>> pathIsNetworkLocation(r'\\remote_machine')
True
"""
p = path.rstrip('/').rstrip('\\')
if len(p) < 3:
return False
if not p.startswith('\\\\'):
return False
if p[2:].count('\\') or p[2:].count('/'):
return False
return True
#------------------------------------------------------------------------------
def main_is_frozen():
"""
Return True if BitDust is started from .exe not from sources.
http://www.py2exe.org/index.cgi/HowToDetermineIfRunningFromExe
"""
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") or # old py2exe
imp.is_frozen("__main__")) # tools/freeze
def isGUIpossible():
"""
"""
# if Windows():
# return True
# if Mac():
# return True
# if Linux():
# return X11_is_running()
# All the UI now will be created using Electron framework.
# To make it possible we need to run local API rest_http/json_rpc server.
# So here we always return False from now.
return False
def X11_is_running():
"""
Linux method to check if BitDust GUI is possible.
http://stackoverflow.com/questions/1027894/detect-if-x11-is-
available-python
"""
global X11isRunning
if not Linux():
return False
if X11isRunning is not None:
return X11isRunning
try:
from subprocess import Popen, PIPE
p = Popen(["xset", "-q"], stdout=PIPE, stderr=PIPE)
p.communicate()
result = p.returncode == 0
except:
result = False
X11isRunning = result
return X11isRunning
#------------------------------------------------------------------------------
def getExecutableDir():
"""
A smart way to detect the path of executable folder.
"""
if main_is_frozen():
path = os.path.dirname(os.path.abspath(sys.executable))
else:
try:
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
except:
path = os.path.dirname(os.path.abspath(sys.argv[0]))
return strng.to_text(path)
def getExecutableFilename():
"""
A smart way to detect executable file name.
"""
if main_is_frozen():
path = os.path.abspath(sys.executable)
else:
path = os.path.abspath(sys.argv[0])
return strng.to_text(path)
def getUserName():
"""
Return current user name in unicode string.
"""
try:
import pwd
except ImportError:
try:
import getpass
except:
pass
pwd = None
try:
if pwd:
return pwd.getpwuid(os.geteuid()).pw_name
else:
return getpass.getuser()
except:
pass
return os.path.basename(strng.to_text(os.path.expanduser('~')))
#------------------------------------------------------------------------------
def listHomeDirLinux():
"""
Just return a list of files and folders in the user home dir.
"""
if Windows():
return []
rootlist = []
homedir = os.path.expanduser('~')
for dirname in os.listdir(homedir):
if os.path.isdir(os.path.join(homedir, dirname)):
rootlist.append(dirname)
return rootlist
def listLocalDrivesWindows():
"""
Return a list of drive letters under Windows.
This list should include only "fixed", "writable" and "real" drives,
not include cd drives, network drives, USB drives, etc.
"""
if not Windows():
return []
rootlist = []
try:
import win32api
import win32file
drives = (drive for drive in win32api.GetLogicalDriveStrings().split("\000") if drive)
for drive in drives:
if win32file.GetDriveType(drive) == 3:
rootlist.append(drive)
except:
lg.exc()
return rootlist
def listRemovableDrivesWindows():
"""
Return a list of "removable" drives under Windows.
"""
l = []
try:
import win32file
drivebits = win32file.GetLogicalDrives()
for d in range(1, 26):
mask = 1 << d
if drivebits & mask:
# here if the drive is at least there
drname = '%c:\\' % chr(ord('A') + d)
t = win32file.GetDriveType(drname)
if t == win32file.DRIVE_REMOVABLE:
l.append(drname)
except:
lg.exc()
return l
def listRemovableDrivesLinux():
"""
Return a list of "removable" drives under Linux.
The same idea with ``listRemovableDrivesWindows``.
"""
try:
return [os.path.join('/media', x) for x in os.listdir('/media')]
except:
return []
def listRemovableDrives():
"""
Platform-independent way to get a list of "removable" locations.
Used to detect the location to write a copy of Private Key. Should
detect USB flash drives.
"""
if Linux():
return listRemovableDrivesLinux()
elif Windows():
return listRemovableDrivesWindows()
return []
def listMountPointsLinux():
"""
Return a list of mount points under Linux.
Used to detect locations for donated space and local backups.
"""
mounts = os.popen('mount')
result = []
if Linux():
mch = re.compile('^(.+?) on (.+?) type .+?$')
else: # Mac
mch = re.compile('^(.+?) on (.+?).*?$')
# mo = re.match('^(.+?) on (.+?) type .+?$', line)
for line in mounts.readlines():
mo = mch.match(line)
if mo:
device = mo.group(1)
mount_point = mo.group(2)
if device.startswith('/dev/'):
result.append(mount_point)
return result
def getMountPointLinux(path):
"""
Return mount point for given path.
"""
path = os.path.abspath(path)
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
#-------------------------------------------------------------------------------
def find_process(applist):
"""
A portable method to search executed processes.
You can provide a name or regexp to scan.
"""
try:
import psutil
pidsL = []
for p in psutil.process_iter():
try:
p_pid = p.pid
p_cmdline = p.cmdline()
except:
continue
if p_pid == os.getpid():
continue
for app in applist:
try:
cmdline = ' '.join(p_cmdline)
except:
continue
if app.startswith('regexp:'):
if re.match(app[7:], cmdline) is not None:
pidsL.append(p_pid)
else:
if cmdline.count(app):
pidsL.append(p_pid)
if pidsL:
return pidsL
except:
pass
ostype = platform.uname()[0]
if ostype == "Windows":
return find_process_win32(applist)
else:
return find_process_linux(applist)
return []
def kill_process(pid):
"""
Call to OS ``kill`` procedure. Portable.
``pid`` - process id.
"""
ostype = platform.uname()[0]
if ostype == "Windows":
kill_process_win32(pid)
else:
kill_process_linux(pid)
def list_processes_linux():
"""
This function will return an iterator with the process pid/cmdline tuple.
:return: pid, cmdline tuple via iterator
:rtype: iterator
>>> for procs in list_processes_linux():
>>> print procs
('5593', '/usr/lib/mozilla/kmozillahelper')
('6353', 'pickup -l -t fifo -u')
('6640', 'kdeinit4: konsole [kdeinit]')
('6643', '/bin/bash')
('7451', '/usr/bin/python /usr/bin/ipython')
"""
for pid_path in glob.glob('/proc/[0-9]*'):
try:
# cmdline represents the command whith which the process was started
f = open("%s/cmdline" % pid_path)
pid = pid_path.split("/")[2] # get the PID
# we replace the \x00 to spaces to make a prettier output from kernel
cmdline = f.read().replace("\x00", " ").rstrip()
f.close()
yield (pid, cmdline)
except:
pass
def find_process_linux(applist):
"""
You can look for some process name, give a keywords or regexp strings list
to search.
This is for Linux.
"""
pidsL = []
for pid, cmdline in list_processes_linux():
try:
pid = int(pid)
except:
continue
if pid == os.getpid():
continue
for app in applist:
if app.startswith('regexp:'):
if re.match(app[7:], cmdline) is not None:
pidsL.append(pid)
else:
if cmdline.find(app) > -1:
pidsL.append(pid)
return pidsL
def find_process_win32(applist):
"""
Search for process name, for MS Windows.
"""
pidsL = []
try:
import win32com.client
objWMI = win32com.client.GetObject("winmgmts:\\\\.\\root\\CIMV2")
colProcs = objWMI.ExecQuery("SELECT * FROM Win32_Process")
for Item in colProcs:
pid = int(Item.ProcessId)
if pid == os.getpid():
continue
cmdline = Item.Caption.lower()
if Item.CommandLine:
cmdline += Item.CommandLine.lower()
for app in applist:
if app.startswith('regexp:'):
if re.match(app[7:], cmdline) is not None:
pidsL.append(pid)
else:
if cmdline.find(app) > -1:
pidsL.append(pid)
except:
lg.exc()
return pidsL
def kill_process_linux(pid):
"""
Make a call to system ``kill`` command.
"""
try:
import signal
os.kill(pid, signal.SIGTERM)
except:
lg.exc()
def kill_process_win32(pid):
"""
Call to system Windows API ``TerminateProcess`` method.
"""
try:
from win32api import TerminateProcess, OpenProcess, CloseHandle
except:
lg.exc()
return False
try:
PROCESS_TERMINATE = 1
handle = OpenProcess(PROCESS_TERMINATE, False, pid)
except:
lg.out(2, 'bpio.kill_process_win32 can not open process %d' % pid)
return False
try:
TerminateProcess(handle, -1)
except:
lg.out(2, 'bpio.kill_process_win32 can not terminate process %d' % pid)
return False
try:
CloseHandle(handle)
except:
lg.exc()
return False
return True
def find_main_process(pid_file_path=None):
"""
"""
appList = find_process([
'bitdustnode.exe',
'BitDustNode.exe',
'regexp:^.*python.*bitdust.py$',
])
if not appList:
return []
try:
if not pid_file_path:
from main import settings
pid_file_path = os.path.join(settings.MetaDataDir(), 'processid')
processid = int(ReadTextFile(pid_file_path))
except:
processid = None
if not processid:
return appList
if processid not in appList:
return []
return [processid, ]
#------------------------------------------------------------------------------
def detect_number_of_cpu_cores():
"""
Detects the number of effective CPUs in the system.
"""
# for Linux, Unix and MacOS
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
#Linux and Unix
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else:
# MacOS X
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# for Windows
if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
return ncpus
# return the default value
return 1
| agpl-3.0 | 287,642,250,189,792,000 | 26.351867 | 96 | 0.544895 | false |
klmitch/dtest | tests/test_alternate.py | 1 | 1428 | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dtest import *
from dtest.util import *
# Ensure that the alternate setUp/tearDown decorators work
class TestAlternate(DTestCase):
alternate = None
def setUp(self):
assert_is_none(self.alternate)
self.alternate = False
def tearDown(self):
assert_false(self.alternate)
# Should use the default setUp/tearDown
def test1(self):
assert_false(self.alternate)
# Have to use @istest here to make the decorators available
@istest
def test2(self):
assert_true(self.alternate)
# Alternate setUp/tearDown for test2
@test2.setUp
def alternateSetUp(self):
assert_is_none(self.alternate)
self.alternate = True
@test2.tearDown
def alternateTearDown(self):
assert_true(self.alternate)
| apache-2.0 | -5,790,726,676,895,818,000 | 28.75 | 78 | 0.696078 | false |
dbiesecke/dbiesecke.github.io | repo/script.video.F4mProxy/lib/TSDownloader.py | 1 | 34131 |
import xml.etree.ElementTree as etree
import base64
from struct import unpack, pack
import sys
import io
import os
import time
import itertools
import xbmcaddon
import xbmc
import urllib2,urllib
import traceback
import urlparse
import posixpath
import re
import socket, struct
from flvlib import tags
from flvlib import helpers
from flvlib.astypes import MalformedFLV
import zlib
from StringIO import StringIO
import hmac
import hashlib
import base64
import bitstring
addon_id = 'script.video.F4mProxy'
selfAddon = xbmcaddon.Addon(id=addon_id)
__addonname__ = selfAddon.getAddonInfo('name')
__icon__ = selfAddon.getAddonInfo('icon')
downloadPath = xbmc.translatePath(selfAddon.getAddonInfo('profile'))#selfAddon["profile"])
#F4Mversion=''
defualtype=""
def getLastPTS(data,rpid,type="video"):
##print 'inpcr'
ret=None
currentpost=len(data)
##print 'currentpost',currentpost
found=False
packsize=188
spoint=0
while not found:
ff=data.rfind('\x47',0,currentpost-1)
##print 'ff',ff,data[ff-188]
if ff==-1:
#print 'No sync data'
found=True
elif data[ff-packsize]=='\x47' and data[ff-packsize-packsize]=='\x47':
spoint=ff
found=True
else:
currentpost=ff-1
##print 'spoint',spoint
if spoint<=0: return None
currentpost= spoint
found=False
while not found:
##print len(data)-currentpost
if len(data)-currentpost>=188:
##print 'currentpost',currentpost
bytes=data[currentpost:currentpost+188]
bits=bitstring.ConstBitStream(bytes=bytes)
sign=bits.read(8).uint
tei = bits.read(1).uint
pusi = bits.read(1).uint
transportpri = bits.read(1).uint
pid = bits.read(13).uint
##print pid
if pid==rpid or rpid==0:
##print pid
##print 1/0
try:
packet = bits.read((packsize-3)*8)
scramblecontrol = packet.read(2).uint
adapt = packet.read(2).uint
concounter = packet.read(4).uint
except:
#print 'error'
return None##print 'errpor'#adapt=-1
decodedpts=None
av=""
##print 'adapt',adapt
if adapt == 3:
adaptation_size = packet.read(8).uint
discontinuity = packet.read(1).uint
random = packet.read(1).uint
espriority = packet.read(1).uint
pcrpresent = packet.read(1).uint
opcrpresent = packet.read(1).uint
splicingpoint = packet.read(1).uint
transportprivate = packet.read(1).uint
adaptation_ext = packet.read(1).uint
restofadapt = (adaptation_size+3) - 1
if pcrpresent == 1:
pcr = packet.read(48)
restofadapt -= 6
if opcrpresent == 1:
opcr = packet.read(48)
restofadapt -= 6
packet.pos += (restofadapt-3) * 8
if ((packet.len - packet.pos)/8) > 5:
pesync = packet.read(24)#.hex
if pesync == ('0x000001'):
pestype = packet.read(8).uint
if pestype > 223 and pestype < 240:
av = 'video'
if pestype < 223 and pestype > 191:
av = 'audio'
packet.pos += (3*8)
ptspresent = packet.read(1).uint
dtspresent = packet.read(1).uint
if ptspresent:
packet.pos += (14)
pts = packet.read(40)
pts.pos = 4
firstpartpts = pts.read(3)
pts.pos += 1
secondpartpts = pts.read(15)
pts.pos += 1
thirdpartpts = pts.read(15)
#decodedpts = bitstring.ConstBitArray().join([firstpartpts.bin, secondpartpts.bin, thirdpartpts.bin]).uint
decodedpts =int(''.join([firstpartpts.bin, secondpartpts.bin, thirdpartpts.bin]),2)#
if dtspresent:
dts = packet.read(40)
dts.pos = 4
firstpartdts = dts.read(3)
dts.pos += 1
secondpartdts = dts.read(15)
dts.pos += 1
thirdpartdts = dts.read(15)
#decodeddts = bitstring.ConstBitArray().join([firstpartdts.bin, secondpartdts.bin, thirdpartdts.bin]).uint
decodeddts =int(''.join([firstpartdts.bin, secondpartdts.bin, thirdpartdts.bin]),2)#
elif adapt == 2:
#if adapt is 2 the packet is only an adaptation field
adaptation_size = packet.read(8).uint
discontinuity = packet.read(1).uint
random = packet.read(1).uint
espriority = packet.read(1).uint
pcrpresent = packet.read(1).uint
opcrpresent = packet.read(1).uint
splicingpoint = packet.read(1).uint
transportprivate = packet.read(1).uint
adaptation_ext = packet.read(1).uint
restofadapt = (adaptation_size+3) - 1
if pcrpresent == 1:
pcr = packet.read(48)
restofadapt -= 6
if opcrpresent == 1:
opcr = packet.read(48)
restofadapt -= 6
elif adapt == 1:
pesync = packet.read(24)#.hex
##print 'pesync',pesync
if pesync == ('0x000001'):
pestype = packet.read(8).uint
if pestype > 223 and pestype < 240:
av = 'video'
if pestype < 223 and pestype > 191:
av = 'audio'
packet.pos += 24
ptspresent = packet.read(1).uint
dtspresent = packet.read(1).uint
##print 'ptspresent',ptspresent
if ptspresent:
packet.pos += (14)
pts = packet.read(40)
pts.pos = 4
firstpartpts = pts.read(3)
pts.pos += 1
secondpartpts = pts.read(15)
pts.pos += 1
thirdpartpts = pts.read(15)
#decodedpts = bitstring.ConstBitArray().join([firstpartpts.bin, secondpartpts.bin, thirdpartpts.bin]).uint
decodedpts =int(''.join([firstpartpts.bin, secondpartpts.bin, thirdpartpts.bin]),2)#
if dtspresent:
dts = packet.read(40)
dts.pos = 4
firstpartdts = dts.read(3)
dts.pos += 1
secondpartdts = dts.read(15)
dts.pos += 1
thirdpartdts = dts.read(15)
#decodeddts = bitstring.ConstBitArray().join([firstpartdts.bin, secondpartdts.bin, thirdpartdts.bin]).uint
decodeddts =int(''.join([firstpartdts.bin, secondpartdts.bin, thirdpartdts.bin]),2)#
if decodedpts and (type=="" or av==type) and len(av)>0:
##print 'currentpost',currentpost,decodedpts
return decodedpts
currentpost=currentpost-packsize
if currentpost<10:
#print 'came back to begin'
found=True
return ret
def getFirstPTSFrom(data,rpid, initpts,type="video" ):
##print 'xxxxxxxxxxxinpcr getFirstPTSFrom'
ret=None
currentpost=0#len(data)
##print 'currentpost',currentpost
found=False
packsize=188
spoint=0
##print 'inwhile'
while not found:
ff=data.find('\x47',currentpost)
if ff==-1:
#print 'No sync data'
found=True
elif data[ff+packsize]=='\x47' and data[ff+packsize+packsize]=='\x47':
spoint=ff
found=True
else:
currentpost=ff+1
##print 'spoint',spoint
if spoint>len(data)-packsize: return None
currentpost= spoint
found=False
while not found:
##print 'currentpost',currentpost
if len(data)-currentpost>=188:
bytes=data[currentpost:currentpost+188]
bits=bitstring.ConstBitStream(bytes=bytes)
sign=bits.read(8).uint
tei = bits.read(1).uint
pusi = bits.read(1).uint
transportpri = bits.read(1).uint
pid = bits.read(13).uint
##print pid
##print pid,rpid
##print 1/0
if rpid==pid or rpid==0:
##print 'here pid is same'
try:
packet = bits.read((packsize-3)*8)
scramblecontrol = packet.read(2).uint
adapt = packet.read(2).uint
concounter = packet.read(4).uint
except:
#print 'error'
return None##print 'errpor'#adapt=-1
decodedpts=None
av=""
if adapt == 3:
adaptation_size = packet.read(8).uint
discontinuity = packet.read(1).uint
random = packet.read(1).uint
espriority = packet.read(1).uint
pcrpresent = packet.read(1).uint
opcrpresent = packet.read(1).uint
splicingpoint = packet.read(1).uint
transportprivate = packet.read(1).uint
adaptation_ext = packet.read(1).uint
restofadapt = (adaptation_size+3) - 1
if pcrpresent == 1:
pcr = packet.read(48)
restofadapt -= 6
if opcrpresent == 1:
opcr = packet.read(48)
restofadapt -= 6
packet.pos += (restofadapt-3) * 8
if ((packet.len - packet.pos)/8) > 5:
pesync = packet.read(24)#.hex
if pesync == ('0x000001'):
pestype = packet.read(8).uint
if pestype > 223 and pestype < 240:
av = 'video'
if pestype < 223 and pestype > 191:
av = 'audio'
packet.pos += (3*8)
ptspresent = packet.read(1).uint
dtspresent = packet.read(1).uint
if ptspresent:
packet.pos += (14)
pts = packet.read(40)
pts.pos = 4
firstpartpts = pts.read(3)
pts.pos += 1
secondpartpts = pts.read(15)
pts.pos += 1
thirdpartpts = pts.read(15)
#decodedpts = bitstring.ConstBitArray().join([firstpartpts.bin, secondpartpts.bin, thirdpartpts.bin]).uint
decodedpts =int(''.join([firstpartpts.bin, secondpartpts.bin, thirdpartpts.bin]),2)#
if dtspresent:
dts = packet.read(40)
dts.pos = 4
firstpartdts = dts.read(3)
dts.pos += 1
secondpartdts = dts.read(15)
dts.pos += 1
thirdpartdts = dts.read(15)
#decodeddts = bitstring.ConstBitArray().join([firstpartdts.bin, secondpartdts.bin, thirdpartdts.bin]).uint
decodeddts =int(''.join([firstpartdts.bin, secondpartdts.bin, thirdpartdts.bin]),2)#
elif adapt == 2:
#if adapt is 2 the packet is only an adaptation field
adaptation_size = packet.read(8).uint
discontinuity = packet.read(1).uint
random = packet.read(1).uint
espriority = packet.read(1).uint
pcrpresent = packet.read(1).uint
opcrpresent = packet.read(1).uint
splicingpoint = packet.read(1).uint
transportprivate = packet.read(1).uint
adaptation_ext = packet.read(1).uint
restofadapt = (adaptation_size+3) - 1
if pcrpresent == 1:
pcr = packet.read(48)
restofadapt -= 6
if opcrpresent == 1:
opcr = packet.read(48)
restofadapt -= 6
elif adapt == 1:
pesync = packet.read(24)#.hex
##print 'pesync',pesync
if pesync == ('0x000001'):
pestype = packet.read(8).uint
if pestype > 223 and pestype < 240:
av = 'video'
if pestype < 223 and pestype > 191:
av = 'audio'
packet.pos += 24
ptspresent = packet.read(1).uint
dtspresent = packet.read(1).uint
##print 'ptspresent',ptspresent
if ptspresent:
packet.pos += (14)
pts = packet.read(40)
pts.pos = 4
firstpartpts = pts.read(3)
pts.pos += 1
secondpartpts = pts.read(15)
pts.pos += 1
thirdpartpts = pts.read(15)
#decodedpts = bitstring.ConstBitArray().join([firstpartpts.bin, secondpartpts.bin, thirdpartpts.bin]).uint
decodedpts =int(''.join([firstpartpts.bin, secondpartpts.bin, thirdpartpts.bin]),2)#
if dtspresent:
dts = packet.read(40)
dts.pos = 4
firstpartdts = dts.read(3)
dts.pos += 1
secondpartdts = dts.read(15)
dts.pos += 1
thirdpartdts = dts.read(15)
#decodeddts = bitstring.ConstBitArray().join([firstpartdts.bin, secondpartdts.bin, thirdpartdts.bin]).uint
decodeddts =int(''.join([firstpartdts.bin, secondpartdts.bin, thirdpartdts.bin]),2)#
if decodedpts and (type=="" or av==type) and len(av)>0:
##print decodedpts
if decodedpts>initpts:
return decodedpts,currentpost
else:
found=True
currentpost=currentpost+188
if currentpost>=len(data):
##print 'came back to begin'
found=True
return ret
class TSDownloader():
outputfile =''
clientHeader=None
def __init__(self):
self.init_done=False
def thisme(self):
return 'aaaa'
def openUrl(self,url, ischunkDownloading=False):
try:
post=None
openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler)
if post:
req = urllib2.Request(url, post)
else:
req = urllib2.Request(url)
ua_header=False
if self.clientHeader:
for n,v in self.clientHeader:
req.add_header(n,v)
if n=='User-Agent':
ua_header=True
if not ua_header:
req.add_header('User-Agent','VLC/2.2.2 LibVLC/2.2.17')
req.add_header('Icy-MetaData','1')
#response = urllib2.urlopen(req)
if self.proxy:
req.set_proxy(self.proxy, 'http')
response = openner.open(req)
return response
except:
#print 'Error in getUrl'
traceback.print_exc()
return None
def getUrl(self,url, ischunkDownloading=False):
try:
post=None
openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler)
if post:
req = urllib2.Request(url, post)
else:
req = urllib2.Request(url)
ua_header=False
if self.clientHeader:
for n,v in self.clientHeader:
req.add_header(n,v)
if n=='User-Agent':
ua_header=True
if not ua_header:
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
#response = urllib2.urlopen(req)
if self.proxy and ( (not ischunkDownloading) or self.use_proxy_for_chunks ):
req.set_proxy(self.proxy, 'http')
response = openner.open(req)
data=response.read()
return data
except:
#print 'Error in getUrl'
traceback.print_exc()
return None
def init(self, out_stream, url, proxy=None,g_stopEvent=None, maxbitRate=0):
try:
self.init_done=False
self.init_url=url
self.clientHeader=None
self.status='init'
self.proxy = proxy
self.maxbitRate=maxbitRate
if self.proxy and len(self.proxy)==0:
self.proxy=None
self.out_stream=out_stream
if g_stopEvent: g_stopEvent.clear()
self.g_stopEvent=g_stopEvent
if '|' in url:
sp = url.split('|')
url = sp[0]
self.clientHeader = sp[1]
self.clientHeader= urlparse.parse_qsl(self.clientHeader)
#print 'header recieved now url and headers are',url, self.clientHeader
self.status='init done'
self.url=url
return True #disable for time being
#return self.downloadInternal(testurl=True)
#os.remove(self.outputfile)
except:
traceback.print_exc()
self.status='finished'
return False
def keep_sending_video(self,dest_stream, segmentToStart=None, totalSegmentToSend=0):
try:
self.status='download Starting'
self.downloadInternal(dest_stream=dest_stream)
except:
traceback.print_exc()
self.status='finished'
def downloadInternal(self,dest_stream=None,testurl=False):
try:
url=self.url
fileout=dest_stream
self.status='bootstrap done'
First=True
cont=True
lastbuf=None
lost=1
ignorefind=0
lastpts=None
fixpid=256
ignoredblock=None
sleeptime=0
firsttimeurl=False
while True:
if sleeptime>0:
xbmc.sleep(sleeptime)
sleeptime=0
starttime=time.time()
response=self.openUrl(url)
buf="start"
byteread=0
bytesent=0
firstBlock=True
wrotesomething=False
currentduration=0
limit=1024*188
if testurl: limit=1024
lastdataread=limit
#print 'starting.............. new url',wrotesomething
try:
if self.g_stopEvent and self.g_stopEvent.isSet():
print 'event set'
return False
while (buf != None and len(buf) > 0 and lastdataread>0):
if self.g_stopEvent and self.g_stopEvent.isSet():
print 'event set'
return False
try:
buf = response.read(limit)##500 * 1024)
lastdataread=len(buf)
byteread+=lastdataread
#print 'got data',len(buf)
if lastdataread==0: print 1/0
if testurl:
print 'test complete true'
response.close()
return True
except:
traceback.print_exc(file=sys.stdout)
print 'testurl',testurl,lost
if testurl and lost>10:
print 'test complete false'
response.close()
return False
buf=None
lost+=1
if lost>10 or firsttimeurl:
fileout.close
return
break
firsttimeurl=False
writebuf=buf
if not First:
##print 'second ite',wrotesomething
if wrotesomething==False:
##print 'second ite wrote something false'#, len(lastbuf)
if lastpts:
#buffertofind=lastbuf#[lastbuf.rfind('G',len(lastbuf)-170):]
##print 'buffertofind',len(buffertofind),buffertofind.encode("hex")
#print 'pts to find',lastpts
lastforcurrent=getLastPTS(buf,fixpid,defualtype)
#print 'last pts in new data',lastforcurrent
if lastpts<lastforcurrent:#we have data
#print 'we have data', lastpts,lastforcurrent, (lastforcurrent-lastpts)/90000
try:
firstpts,pos= getFirstPTSFrom(buf,fixpid,lastpts,defualtype)#
except:
traceback.print_exc(file=sys.stdout)
print 'getFirstPTSFrom error, using, last -1',# buf.encode("hex"), lastpts,
firstpts,pos= getFirstPTSFrom(buf,fixpid,lastpts-1,defualtype)#
#if ignoredblock and (lastpts-firstpts)<0:
# print 'ignored last block yet the new block loosing data'
# print lastpts,firstpts,lastpts-firstpts
# print ignoredblock.encode('hex')
# print buf.encode('hex')
#print 'last pst send',lastpts,
#print 'first pst new',firstpts
#if abs(lastpts-firstpts)>300000:
# print 'xxxxxxxxxxxxxxxxxx',buf.encode("hex")
#print 'last pst new',lastforcurrent
if firstpts>lastforcurrent:
print 'bad pts? ignore'#, buf.encode("hex")
#print 'auto pos',pos
if pos==None: pos=0
if pos>5000:
rawpos=buf.find(lastbuf[-5000:])
if rawpos>=0:
pos=rawpos+5000
#print 'overridin 1'
else:
#print 'rawpos',rawpos,lastbuf[-5000:].encode("hex")
#print 'buff',buf.encode("hex")
rawpos=(ignoredblock+buf).find((lastbuf)[-5000:])
if rawpos>len(ignoredblock):
pos=rawpos-len(ignoredblock)
#print 'overridin 2'
#else:
# print 'using next PTS', pos, firstpts
ignoredblock=None
#else: pos=0
#print firstpts,pos,(firstpts-lastpts)/90000
#fn=buf.find(buffertofind[:188])
#print 'BUFFER FOUND!!', (pos*100)/len(buf)
if (pos*100)/len(buf)>70:
sleeptime=0
buf= buf[pos:]
lastpts=lastforcurrent
#print 'now last pts',lastpts
wrotesomething=True
else:
#if lastforcurrent==None:
# print 'NONE ISSUE', buf.encode("hex")
print 'problembytes','diff',lastpts,lastforcurrent, lastpts, lastforcurrent
#buf.encode("hex")
ignoredblock=writebuf
ignorefind+=1#same or old data?
writebuf=None
#if lastpts-lastforcurrent>(90000*10):
#lastdataread=0 # read again we are buffering
#response.close()
#xbmc.sleep(1000)
# print 'reconnect'
#if ignorefind>5:
# ignorefind=0
# #print 'not ignoring so write data'
#else:
# #print 'ignoring at the m'
# writebuf=None
#print 'Buffer NOT FOUND!!ignoring'
#else:
# writebuf=None
##print 'second ite wrote something false skipiing'
#else:
##print 'second ite wrote something so continue'
else:
#print 'found first packet', len(writebuf)
First=False
if not ('\x47' in writebuf[0:20]):
#fileout.write(buf)
#fileout.flush()
print 'file not TS', repr(writebuf[:100])
fileout.close()
return
starttime=time.time()
if writebuf and len(writebuf)>0:
wrotesomething=True
if len(buf)>5000 or lastbuf==None:
lastbuf=buf
else:
lastbuf+=buf
bytesent+=len(buf)
fileout.write(buf)
##print 'writing something..............'
fileout.flush()
lastpts1=getLastPTS(lastbuf,fixpid,defualtype)
if lastpts and lastpts1 and lastpts1-lastpts<0:
print 'too small?',lastpts , lastpts1,lastpts1-lastpts
#print lastbuf.encode("hex")
if not lastpts1==None: lastpts=lastpts1
try:
firsttime,pos=getFirstPTSFrom(lastbuf,fixpid,0,defualtype)#
#print lastpts,firsttime
currentduration += (lastpts-firsttime)/90000
##print 'currentduration',currentduration
#currentduration-=2
#f currentduration<=2:
# currentduration=0
#if currentduration>10: currentduration=2
##print 'sleeping for',currentduration
except: pass
try:
print 'finished',byteread
if byteread>0:
print 'Percent Used'+str(((bytesent*100)/byteread))
response.close()
print 'response closed'
except:
print 'close error'
traceback.print_exc(file=sys.stdout)
if wrotesomething==False :
if lost<10: continue
fileout.close()
#print time.asctime(), "Closing connection"
return
else:
lost=0
if lost<0: lost=0
#xbmc.sleep(len(buf)*1000/1024/200)
#print 'finish writing',len(lastbuf)
##print lastbuf[-188:].encode("hex")
endtime=time.time()
timetaken=int((endtime-starttime))
#print 'video time',currentduration
#print 'processing time',timetaken
sleeptime=currentduration-timetaken-2
#print 'sleep time',sleeptime
#if sleeptime>0:
# xbmc.sleep(sleeptime*1000)#len(buf)/1024/1024*5000)
except socket.error, e:
print time.asctime(), "Client Closed the connection."
try:
response.close()
fileout.close()
except Exception, e:
return
return
except Exception, e:
traceback.print_exc(file=sys.stdout)
response.close()
fileout.close()
return False
except:
traceback.print_exc()
return
| mit | -3,784,030,089,646,129,700 | 44.627049 | 156 | 0.398875 | false |
hydroshare/hydroshare | hs_tracking/models.py | 1 | 11384 | from datetime import datetime, timedelta
from django.db import models
from django.db.models import F
from django.core import signing
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.contrib.auth.models import User
from theme.models import UserProfile
from .utils import get_std_log_fields
from hs_core.models import BaseResource
from hs_core.hydroshare import get_resource_by_shortkey
SESSION_TIMEOUT = settings.TRACKING_SESSION_TIMEOUT
PROFILE_FIELDS = settings.TRACKING_PROFILE_FIELDS
USER_FIELDS = settings.TRACKING_USER_FIELDS
VISITOR_FIELDS = ["id"] + USER_FIELDS + PROFILE_FIELDS
if set(PROFILE_FIELDS) & set(USER_FIELDS):
raise ImproperlyConfigured("hs_tracking PROFILE_FIELDS and USER_FIELDS must not contain"
" overlapping field names")
class SessionManager(models.Manager):
def for_request(self, request, user=None):
if hasattr(request, 'user'):
user = request.user
signed_id = request.session.get('hs_tracking_id')
if signed_id:
tracking_id = signing.loads(signed_id)
cut_off = datetime.now() - timedelta(seconds=SESSION_TIMEOUT)
session = None
try:
session = Session.objects.filter(
variable__timestamp__gte=cut_off).filter(id=tracking_id['id']).first()
except Session.DoesNotExist:
pass
if session is not None and user is not None:
if session.visitor.user is None and user.is_authenticated():
try:
session.visitor = Visitor.objects.get(user=user)
session.save()
except Visitor.DoesNotExist:
session.visitor.user = user
session.visitor.save()
return session
# No session found, create one
if user.is_authenticated():
visitor, _ = Visitor.objects.get_or_create(user=user)
else:
visitor = Visitor.objects.create()
session = Session.objects.create(visitor=visitor)
# get standard fields and format
fields = get_std_log_fields(request, session)
msg = Variable.format_kwargs(**fields)
session.record('begin_session', msg)
request.session['hs_tracking_id'] = signing.dumps({'id': session.id})
return session
class Visitor(models.Model):
first_seen = models.DateTimeField(auto_now_add=True)
user = models.OneToOneField(settings.AUTH_USER_MODEL, null=True,
on_delete=models.SET_NULL,
related_name='visitor')
def export_visitor_information(self):
"""Exports visitor profile information."""
info = {
"id": self.id,
}
if self.user:
profile = UserProfile.objects.get(user=self.user)
for field in PROFILE_FIELDS:
info[field] = getattr(profile, field)
for field in USER_FIELDS:
info[field] = getattr(self.user, field)
else:
profile = None
for field in PROFILE_FIELDS:
info[field] = None
for field in USER_FIELDS:
info[field] = None
return info
class Session(models.Model):
begin = models.DateTimeField(auto_now_add=True)
visitor = models.ForeignKey(Visitor, related_name='session')
# TODO: hostname = models.CharField(null=True, default=None, max_length=256)
objects = SessionManager()
def get(self, name):
return Variable.objects.filter(session=self, name=name).first().get_value()
def getlist(self, name):
return [v.get_value() for v in Variable.objects.filter(session=self, name=name)]
def record(self, *args, **kwargs):
args = (self,) + args
return Variable.record(*args, **kwargs)
class Variable(models.Model):
TYPES = (
('Integer', int),
('Floating Point', float),
('Text', str),
('Flag', bool),
('None', lambda o: None)
)
TYPE_CHOICES = [
(i, label)
for (i, label) in
enumerate(label for (label, coercer) in TYPES)
]
from hs_core.models import BaseResource
session = models.ForeignKey(Session, related_name='variable')
timestamp = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=32)
type = models.IntegerField(choices=TYPE_CHOICES)
# change value to TextField to be less restrictive as max_length of CharField has been
# exceeded a couple of times
value = models.TextField()
# If a resource no longer exists, last_resource_id remains valid but resource is NULL
resource = models.ForeignKey(BaseResource, null=True,
related_name='variable',
on_delete=models.SET_NULL)
last_resource_id = models.CharField(null=True, max_length=32)
# flags describe kind of visit. False for non-visits
landing = models.BooleanField(null=False, default=False)
rest = models.BooleanField(null=False, default=False)
# REDUNDANT: internal = models.BooleanField(null=False, default=False)
def get_value(self):
v = self.value
if self.type == 3: # boolean types don't coerce reflexively
if v == 'true':
return True
else:
return False
else:
t = self.TYPES[self.type][1]
return t(v)
@classmethod
def format_kwargs(cls, **kwargs):
msg_items = []
for k, v in list(kwargs.items()):
msg_items.append('%s=%s' % (str(k), str(v)))
return '|'.join(msg_items)
@classmethod
def record(cls, session, name, value=None, resource=None, resource_id=None,
rest=False, landing=False):
if resource is None and resource_id is not None:
try:
resource = get_resource_by_shortkey(resource_id, or_404=False)
except BaseResource.DoesNotExist:
resource = None
return Variable.objects.create(session=session, name=name,
type=cls.encode_type(value),
value=cls.encode(value),
last_resource_id=resource_id,
resource=resource,
rest=rest,
landing=landing)
@classmethod
def encode(cls, value):
if value is None:
return ''
elif isinstance(value, bool):
return 'true' if value else 'false' # only empty strings are False
elif isinstance(value, (int, float, str)):
return str(value)
else:
raise ValueError("Unknown type (%s) for tracking variable: %r",
type(value).__name__, value)
@classmethod
def encode_type(cls, value):
if value is None:
return 4
elif isinstance(value, bool):
return 3
elif isinstance(value, str):
return 2
elif isinstance(value, float):
return 1
elif isinstance(value, int):
return 0
else:
raise TypeError("Unable to record variable of unrecognized type %s",
type(value).__name__)
@classmethod
def recent_resources(cls, user, n_resources=5, days=60):
"""
fetch the most recent n resources with which a specific user has interacted
:param user: The user to document.
:param n_resources: the number of resources to return.
:param days: the number of days to scan.
The reason for the parameter `days` is that the runtime of this method
is very dependent upon the days that one scans. Thus, there is a tradeoff
between reporting history and timely responsiveness of the dashboard.
"""
# TODO: document actions like labeling and commenting (currently these are 'visit's)
return BaseResource.objects.filter(
variable__session__visitor__user=user,
variable__timestamp__gte=(datetime.now()-timedelta(days)),
variable__resource__isnull=False,
variable__name='visit')\
.only('short_id', 'created')\
.distinct()\
.annotate(public=F('raccess__public'),
discoverable=F('raccess__discoverable'),
published=F('raccess__published'),
last_accessed=models.Max('variable__timestamp'))\
.filter(variable__timestamp=F('last_accessed'))\
.order_by('-last_accessed')[:n_resources]
@classmethod
def popular_resources(cls, n_resources=5, days=60, today=None):
"""
fetch the most recent n resources with which a specific user has interacted
:param n_resources: the number of resources to return.
:param days: the number of days to scan.
The reason for the parameter `days` is that the runtime of this method
is very dependent upon the days that one scans. Thus, there is a tradeoff
between reporting history and timely responsiveness of the dashboard.
"""
# TODO: document actions like labeling and commenting (currently these are 'visit's)
if today is None:
today = datetime.now()
return BaseResource.objects.filter(
variable__timestamp__gte=(today-timedelta(days)),
variable__timestamp__lt=(today),
variable__resource__isnull=False,
variable__name='visit')\
.distinct()\
.annotate(users=models.Count('variable__session__visitor__user'))\
.annotate(public=F('raccess__public'),
discoverable=F('raccess__discoverable'),
published=F('raccess__published'),
last_accessed=models.Max('variable__timestamp'))\
.order_by('-users')[:n_resources]
@classmethod
def recent_users(cls, resource, n_users=5, days=60):
"""
fetch the identities of the most recent users who have accessed a resource
:param resource: The resource to document.
:param n_users: the number of users to return.
:param days: the number of days to scan.
The reason for the parameter `days` is that the runtime of this method
is very dependent upon the number of days that one scans. Thus, there is a
tradeoff between reporting history and timely responsiveness of the dashboard.
"""
return User.objects\
.filter(visitor__session__variable__resource=resource,
visitor__session__variable__name='visit',
visitor__session__variable__timestamp__gte=(datetime.now() -
timedelta(days)))\
.distinct()\
.annotate(last_accessed=models.Max('visitor__session__variable__timestamp'))\
.filter(visitor__session__variable__timestamp=F('last_accessed'))\
.order_by('-last_accessed')[:n_users]
| bsd-3-clause | -4,601,846,985,342,638,000 | 38.804196 | 92 | 0.586174 | false |
Nesiehr/osf.io | api/preprints/views.py | 1 | 14649 | import re
from django.db.models import Q
from rest_framework import generics
from rest_framework.exceptions import NotFound, PermissionDenied, NotAuthenticated
from rest_framework import permissions as drf_permissions
from website.models import PreprintService
from framework.auth.oauth_scopes import CoreScopes
from api.base.exceptions import Conflict
from api.base.views import JSONAPIBaseView
from api.base.filters import PreprintFilterMixin
from api.base.parsers import (
JSONAPIMultipleRelationshipsParser,
JSONAPIMultipleRelationshipsParserForRegularJSON,
)
from api.base.utils import get_object_or_error, get_user_auth
from api.base import permissions as base_permissions
from api.citations.utils import render_citation, preprint_csl
from api.preprints.serializers import (
PreprintSerializer,
PreprintCreateSerializer,
PreprintCitationSerializer,
)
from api.nodes.serializers import (
NodeCitationStyleSerializer,
)
from api.nodes.views import NodeMixin, WaterButlerMixin
from api.nodes.permissions import ContributorOrPublic
from api.preprints.permissions import PreprintPublishedOrAdmin
class PreprintMixin(NodeMixin):
serializer_class = PreprintSerializer
preprint_lookup_url_kwarg = 'preprint_id'
def get_preprint(self, check_object_permissions=True):
preprint = get_object_or_error(
PreprintService,
self.kwargs[self.preprint_lookup_url_kwarg],
display_name='preprint'
)
if not preprint:
raise NotFound
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, preprint)
return preprint
class PreprintList(JSONAPIBaseView, generics.ListCreateAPIView, PreprintFilterMixin):
"""Preprints that represent a special kind of preprint node. *Writeable*.
Paginated list of preprints ordered by their `date_created`. Each resource contains a representation of the
preprint.
##Preprint Attributes
OSF Preprint entities have the "preprints" `type`.
name type description
====================================================================================
date_created iso8601 timestamp timestamp that the preprint was created
date_modified iso8601 timestamp timestamp that the preprint was last modified
date_published iso8601 timestamp timestamp when the preprint was published
is_published boolean whether or not this preprint is published
is_preprint_orphan boolean whether or not this preprint is orphaned
subjects list of lists of dictionaries ids of Subject in the BePress taxonomy. Dictionary, containing the subject text and subject ID
doi string bare DOI for the manuscript, as entered by the user
##Relationships
###Node
The node that this preprint was created for
###Primary File
The file that is designated as the preprint's primary file, or the manuscript of the preprint.
###Provider
Link to preprint_provider detail for this preprint
##Links
- `self` -- Preprint detail page for the current preprint
- `html` -- Project on the OSF corresponding to the current preprint
- `doi` -- URL representation of the DOI entered by the user for the preprint manuscript
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Preprints may be filtered by their `id`, `is_published`, `date_created`, `date_modified`, `provider`
Most are string fields and will be filtered using simple substring matching.
###Creating New Preprints
Create a new preprint by posting to the guid of the existing **node**, including the file_id for the
file you'd like to make the primary preprint file. Note that the **node id** will not be accessible via the
preprints detail view until after the preprint has been created.
Method: POST
URL: /preprints/
Query Params: <none>
Body (JSON): {
"data": {
"attributes": {},
"relationships": {
"node": { # required
"data": {
"type": "nodes",
"id": {node_id}
}
},
"primary_file": { # required
"data": {
"type": "primary_files",
"id": {file_id}
}
},
"provider": { # required
"data": {
"type": "providers",
"id": {provider_id}
}
},
}
}
}
Success: 201 CREATED + preprint representation
New preprints are created by issuing a POST request to this endpoint, along with the guid for the node to create a preprint from.
Provider defaults to osf.
#This Request/Response
"""
# These permissions are not checked for the list of preprints, permissions handled by the query
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic,
)
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
required_read_scopes = [CoreScopes.NODE_PREPRINTS_READ]
required_write_scopes = [CoreScopes.NODE_PREPRINTS_WRITE]
serializer_class = PreprintSerializer
ordering = ('-date_created')
view_category = 'preprints'
view_name = 'preprint-list'
def get_serializer_class(self):
if self.request.method == 'POST':
return PreprintCreateSerializer
else:
return PreprintSerializer
# overrides DjangoFilterMixin
def get_default_django_query(self):
auth = get_user_auth(self.request)
auth_user = getattr(auth, 'user', None)
# Permissions on the list objects are handled by the query
default_query = Q(node__isnull=False, node__is_deleted=False)
no_user_query = Q(is_published=True, node__is_public=True)
if auth_user:
contrib_user_query = Q(is_published=True, node__contributor__user_id=auth_user.id, node__contributor__read=True)
admin_user_query = Q(node__contributor__user_id=auth_user.id, node__contributor__admin=True)
return (default_query & (no_user_query | contrib_user_query | admin_user_query))
return (default_query & no_user_query)
# overrides ListAPIView
def get_queryset(self):
return PreprintService.objects.filter(self.get_query_from_request()).distinct()
class PreprintDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, PreprintMixin, WaterButlerMixin):
"""Preprint Detail *Writeable*.
##Preprint Attributes
OSF Preprint entities have the "preprints" `type`.
name type description
====================================================================================
date_created iso8601 timestamp timestamp that the preprint was created
date_modified iso8601 timestamp timestamp that the preprint was last modified
date_published iso8601 timestamp timestamp when the preprint was published
is_published boolean whether or not this preprint is published
is_preprint_orphan boolean whether or not this preprint is orphaned
subjects array of tuples of dictionaries ids of Subject in the BePress taxonomy. Dictionary, containing the subject text and subject ID
doi string bare DOI for the manuscript, as entered by the user
##Relationships
###Node
The node that this preprint was created for
###Primary File
The file that is designated as the preprint's primary file, or the manuscript of the preprint.
###Provider
Link to preprint_provider detail for this preprint
##Links
- `self` -- Preprint detail page for the current preprint
- `html` -- Project on the OSF corresponding to the current preprint
- `doi` -- URL representation of the DOI entered by the user for the preprint manuscript
##Updating Preprints
Update a preprint by sending a patch request to the guid of the existing preprint node that you'd like to update.
Method: PATCH
URL: /preprints/{node_id}/
Query Params: <none>
Body (JSON): {
"data": {
"id": node_id,
"attributes": {
"subjects": [({root_subject_id}, {child_subject_id}), ...] # optional
"is_published": true, # optional
"doi": {valid_doi} # optional
},
"relationships": {
"primary_file": { # optional
"data": {
"type": "primary_files",
"id": {file_id}
}
}
}
}
}
Success: 200 OK + preprint representation
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic,
PreprintPublishedOrAdmin,
)
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
required_read_scopes = [CoreScopes.NODE_PREPRINTS_READ]
required_write_scopes = [CoreScopes.NODE_PREPRINTS_WRITE]
serializer_class = PreprintSerializer
view_category = 'preprints'
view_name = 'preprint-detail'
def get_object(self):
return self.get_preprint()
def perform_destroy(self, instance):
if instance.is_published:
raise Conflict('Published preprints cannot be deleted.')
PreprintService.remove_one(instance)
class PreprintCitationDetail(JSONAPIBaseView, generics.RetrieveAPIView, PreprintMixin):
""" The citation details for a preprint, in CSL format *Read Only*
##PreprintCitationDetail Attributes
name type description
=================================================================================
id string unique ID for the citation
title string title of project or component
author list list of authors for the preprint
publisher string publisher - the preprint provider
type string type of citation - web
doi string doi of the resource
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CITATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = PreprintCitationSerializer
view_category = 'preprints'
view_name = 'preprint-citation'
def get_object(self):
preprint = self.get_preprint()
auth = get_user_auth(self.request)
if preprint.node.is_public or preprint.node.can_view(auth) or preprint.is_published:
return preprint_csl(preprint, preprint.node)
raise PermissionDenied if auth.user else NotAuthenticated
class PreprintCitationStyleDetail(JSONAPIBaseView, generics.RetrieveAPIView, PreprintMixin):
""" The citation for a preprint in a specific style's format. *Read Only*
##NodeCitationDetail Attributes
name type description
=================================================================================
citation string complete citation for a preprint in the given style
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CITATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeCitationStyleSerializer
view_category = 'preprint'
view_name = 'preprint-citation'
def get_object(self):
preprint = self.get_preprint()
auth = get_user_auth(self.request)
style = self.kwargs.get('style_id')
if preprint.node.is_public or preprint.node.can_view(auth) or preprint.is_published:
try:
citation = render_citation(node=preprint, style=style)
except ValueError as err: # style requested could not be found
csl_name = re.findall('[a-zA-Z]+\.csl', err.message)[0]
raise NotFound('{} is not a known style.'.format(csl_name))
return {'citation': citation, 'id': style}
raise PermissionDenied if auth.user else NotAuthenticated
| apache-2.0 | 1,803,590,385,282,628,000 | 41.46087 | 170 | 0.562837 | false |
macarthur-lab/exac_readviz_scripts | scripts/reset_failed_db_records.py | 1 | 27810 | """
This script updates database records in the interval, variant, sample tables to
recover from various kinds of transient errors fixable errors
such as cluster filesystem problems, some previously-generated files being deleted, etc.
"""
import os
import sys
from mysql.connector import MySQLConnection
from utils.constants import DB_HOST, DB_PORT, DB_USER
INTERVALS_TABLE = 'python2_pipeline_i1000000'
# initialize flags that control which sections are actually executed
reset_samples_with_transient_error = 0
reset_variants_with_transient_errors = 0
reset_variants_with_fewer_than_expected_available_samples = 0
reset_variants_with_original_bams_marked_missing_due_to_transient_error = 0
reset_variants_with_bams_in_db_but_not_on_disk = 0
reset_intervals_that_had_error_code = 0
reset_variants_that_contain_unfinished_samples = 0
reset_intervals_that_contain_unfinished_variants = 0
reset_intervals_that_contain_unfinished_samples = 0
reset_unfinished_intervals_to_clear_job_id = 0
reset_unfinished_samples_in_finished_chroms = 0
run_stat_queries = 0
reset_unfinished_samples_in_finished_chroms = 0
set_intervals_where_all_contained_variants_have_finished = 0
reset_unfinished_intervals_in_important_genes = 0
reset_samples_with_original_bams_marked_missing_due_to_transient_error = 0
# set flags to execute particular sections of code
#reset_variants_with_transient_errors = 1
#reset_variants_with_fewer_than_expected_available_samples = 1
#reset_variants_with_original_bams_marked_missing_due_to_transient_error = 1
#reset_variants_with_bams_in_db_but_not_on_disk = 1
#reset_variants_that_contain_unfinished_samples = 1
#reset_intervals_that_contain_unfinished_variants = 1
#reset_intervals_that_had_error_code = 1
reset_samples_with_transient_error = 1
reset_samples_with_original_bams_marked_missing_due_to_transient_error = 1
reset_unfinished_samples_in_finished_chroms = 1
reset_intervals_that_contain_unfinished_samples = 1
#reset_unfinished_intervals_to_clear_job_id = 1
#run_stat_queries = 1
print("connecting to db")
conn = MySQLConnection(user=DB_USER, host=DB_HOST, port=DB_PORT, database='exac_readviz')
c = conn.cursor(buffered=True)
def print_query(q):
print(q)
def run_query(q):
print('---------')
print(q)
#if not q.lower().strip().startswith("select"):
# return
c.execute(q)
if not q.lower().strip().startswith("select"):
conn.commit()
print("%d rows updated" % c.rowcount)
else:
print("%d rows returned" % c.rowcount)
return c
"""
+----------------------------+--------------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+----------------------------+--------------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| chrom | varchar(5) | NO | MUL | NULL | |
| pos | int(11) | NO | | NULL | |
| ref | varchar(500) | NO | | NULL | |
| alt | varchar(500) | NO | | NULL | |
| het_or_hom | varchar(3) | NO | | NULL | |
| finished | tinyint(1) | NO | | NULL | |
| sample_id | varchar(50) | NO | | NULL | |
| sample_i | int(11) | YES | | NULL | |
| original_bam_path | longtext | YES | | NULL | |
| original_gvcf_path | longtext | YES | | NULL | |
| output_bam_path | longtext | YES | | NULL | |
| is_missing_original_gvcf | tinyint(1) | NO | | NULL | |
| calling_interval_start | int(11) | YES | | NULL | |
| calling_interval_end | int(11) | YES | | NULL | |
| hc_succeeded | tinyint(1) | NO | | NULL | |
| hc_error_code | int(11) | YES | | NULL | |
| hc_error_text | longtext | YES | | NULL | |
| hc_n_artificial_haplotypes | int(11) | YES | | NULL | |
| hc_started_time | datetime | YES | | NULL | |
| hc_finished_time | datetime | YES | | NULL | |
| hc_command_line | longtext | YES | | NULL | |
+----------------------------+--------------+------+-----+---------+----------------+
"""
if reset_variants_with_original_bams_marked_missing_due_to_transient_error:
print("=== reset_variants_with_original_bams_marked_missing_due_to_transient_error ===")
print("step 1: Find bams that caused a file-doesn't-exist error, but actually do exist")
c.execute("select distinct original_bam_path from sample where hc_error_code=1000")
all_original_bam_paths = list(c.fetchall())
print("Found %d distinct bams that caused 1000 error" % len(all_original_bam_paths))
found_bam_paths = tuple([p[0] for p in all_original_bam_paths if os.path.isfile(p[0])])
print("Of these, %d actually exist on disk. Reset records with missing-bam errors to finished=0 for bams in this list" % len(found_bam_paths))
if found_bam_paths:
pass
#run_query(("update variant as v join sample as s on "
# "v.chrom=s.chrom and v.pos=s.pos and v.ref=s.ref and v.alt=s.alt and v.het_or_hom_or_hemi=s.het_or_hom_or_hemi "
# "set v.finished=0, v.comments=NULL, n_available_samples=NULL, n_expected_samples=NULL, readviz_bam_paths=NULL "
# "where v.n_available_samples>=0 and v.n_available_samples<v.n_expected_samples and "
# "s.hc_error_code IN (1000, 1010) and s.original_bam_path IN %s") % str(found_bam_paths).replace(',)', ')'))
#run_query(("update sample as s join variant as v on "
# "v.chrom=s.chrom and v.pos=s.pos and v.ref=s.ref and v.alt=s.alt and v.het_or_hom_or_hemi=s.het_or_hom_or_hemi "
# "set s.finished=0, s.comments=NULL, hc_succeeded=0, hc_error_code=NULL, hc_error_text=NULL, sample_i=NULL, original_bam_path=NULL, original_gvcf_path=NULL, output_bam_path=NULL, hc_command_line=NULL "
# "where v.n_available_samples>=0 and v.n_available_samples<v.n_expected_samples and "
# "s.hc_error_code IN (1000, 1010) and s.original_bam_path IN %s") % str(found_bam_paths).replace(',)', ')'))
if reset_samples_with_original_bams_marked_missing_due_to_transient_error:
print("=== reset_samples_with_original_bams_marked_missing_due_to_transient_error ===")
print("step 1: Find bams that caused a file-doesn't-exist error, but actually do exist")
c.execute("select distinct original_bam_path from sample where hc_error_code=1000")
all_original_bam_paths = list(c.fetchall())
print("Found %d distinct bams that caused 1000 error" % len(all_original_bam_paths))
found_bam_paths = tuple([p[0] for p in all_original_bam_paths if os.path.isfile(p[0])])
print("Of these, %d actually exist on disk. Reset records with missing-bam errors to finished=0 for bams in this list" % len(found_bam_paths))
if found_bam_paths:
run_query("update sample set started=0, started_time=NULL, finished=0, finished_time=NULL, hc_succeeded=0, hc_error_text=NULL, hc_error_code=NULL, comments=NULL "
"where hc_error_code=1000 and original_bam_path IN %s" % str(found_bam_paths).replace(',)', ')').replace("u'", "'"))
if reset_variants_with_transient_errors:
print("=== reset_variants_with_transient_errors ===")
print("For *samples* with transient errors, reset them to finished=0")
run_query(("update sample as s join variant as v on "
"v.chrom=s.chrom and v.pos=s.pos and v.ref=s.ref and v.alt=s.alt and v.het_or_hom_or_hemi=s.het_or_hom_or_hemi "
"set s.comments=NULL, s.finished=0, hc_succeeded=0, hc_error_code=NULL, hc_error_text=NULL, sample_i=NULL, original_bam_path=NULL, original_gvcf_path=NULL, output_bam_path=NULL, hc_command_line=NULL "
"where (s.hc_error_code IN (2001, 2011, 2009, 2019, 2021, 4000) or (s.hc_error_code is NULL and s.hc_succeeded=0)) " # 3001,
"and v.n_available_samples>=0 and v.n_available_samples<v.n_expected_samples"))
print("For *variants* with transient errors, reset them to finished=0")
run_query(("update variant as v join sample as s on "
"v.chrom=s.chrom and v.pos=s.pos and v.ref=s.ref and v.alt=s.alt and v.het_or_hom_or_hemi=s.het_or_hom_or_hemi "
"set v.comments=NULL, v.finished=0, n_available_samples=NULL, n_expected_samples=NULL, readviz_bam_paths=NULL "
"where (s.hc_error_code IN (2001, 2011, 2009, 2019, 2021, 4000) or (s.hc_error_code is NULL and s.hc_succeeded=0)) " # 3001,
"and v.n_available_samples>=0 and v.n_available_samples<v.n_expected_samples"))
ALL_CHROMS = list(map(str, [1,10,11,12,13,14,15,16,17,18,19,2,20,21,22,3,4,5,6,7,8,9, 'X','Y']))
FINISHED_CHROMS = list(map(str, [12,14,15,7,"X"])) #21,22,3,4,5,6,7,8,9, 'X','Y']))
FINISHED_CHROMS_STRING = str(tuple(map(str, FINISHED_CHROMS))).replace(",)", ")") #
# reset unfinished intervals not in top genes and not in genomic regions
if reset_unfinished_intervals_in_important_genes:
print("=== reset_unfinished_intervals_in_important_genes ===")
print("Reset intervals in important genes")
sql_is_overlapping = []
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), "data/top_intervals.txt")) as f:
for line in f:
i_name, i_chrom, i_start, i_end = line.strip("\n").split("\t")
sql_is_overlapping.append( "(chrom='%(i_chrom)s' and start_pos <= %(i_end)s and end_pos>=%(i_start)s )" % locals() )
if len(sql_is_overlapping) > 300:
break
run_query(("select * from " + INTERVALS_TABLE +
"where finished=0 and ( %s )") % " or ".join(sql_is_overlapping))
# enable intervals that are overlapping the intervals of interest
print("Reset intervals overlapping intervals of interest")
run_query(("update " + INTERVALS_TABLE + " set job_id=NULL, comments=NULL, task_id=NULL, unique_id=NULL "
"where (job_id is NULL or job_id = -1 ) and finished=0 and ( %s )") % " or ".join(sql_is_overlapping))
if run_stat_queries or set_intervals_where_all_contained_variants_have_finished or reset_intervals_that_contain_unfinished_variants or reset_intervals_that_contain_unfinished_samples:
"""
+---------------------+--------------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+---------------------+--------------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| chrom | varchar(5) | NO | MUL | NULL | |
| pos | int(11) | NO | | NULL | |
| ref | varchar(500) | NO | | NULL | |
| alt | varchar(500) | NO | | NULL | |
| het_or_hom | varchar(3) | NO | | NULL | |
| finished | tinyint(1) | NO | | NULL | |
| n_expected_samples | int(11) | YES | | NULL | |
| n_available_samples | int(11) | YES | | NULL | |
| readviz_bam_paths | longtext | YES | | NULL | |
+---------------------+--------------+------+-----+---------+----------------+
"""
# python3_4_generate_HC_bams_py_i200
# get all intervals
from collections import defaultdict
all_intervals = defaultdict(set)
c = run_query("select chrom, start_pos, end_pos, started, finished, error_code from %(INTERVALS_TABLE)s" % locals())
for chrom, start_pos, end_pos, started, finished, error_code in c:
all_intervals[chrom].add((chrom, int(start_pos), int(end_pos), int(started), int(finished), int(error_code)))
all_finished_intervals = defaultdict(set)
c = run_query("select chrom, start_pos, end_pos, started, finished, error_code from %(INTERVALS_TABLE)s where started=1 and finished=1" % locals())
for chrom, start_pos, end_pos, started, finished, error_code in c:
all_finished_intervals[chrom].add((chrom, int(start_pos), int(end_pos), int(started), int(finished), int(error_code)))
total = finished_count = 0
for chrom in all_intervals:
finished_count += len(all_finished_intervals[chrom])
total += len(all_intervals[chrom])
print("%s: %s finished out of %s intervals" % (chrom, len(all_finished_intervals[chrom]), len(all_intervals[chrom])))
print("total: %s finished out of %s total intervals" % (finished_count, total))
# set intervals as finished if all variants in them are finished
if set_intervals_where_all_contained_variants_have_finished:
# unlike the other recovery code
# this is useful for rebuilding the intervals table from scratch by setting
# each interval record to finished if all variants it contains are finished
print("=== set_intervals_where_all_contained_variants_have_finished ===")
for current_chrom in ALL_CHROMS:
#all_intervals[chrom].add((chrom, int(start_pos), int(end_pos), int(started), int(finished), int(error_code)))
for _, start_pos, end_pos, _, _, _ in all_intervals[current_chrom]:
c = run_query("select chrom, pos from variant as v where chrom='%(current_chrom)s' and pos >= %(start_pos)s and pos <= %(end_pos)s and finished=0" % locals())
if c.rowcount > 0:
print("Found %s unfinished variants in %s:%s-%s. Skipping" % (c.rowcount, current_chrom, start_pos, end_pos))
else:
run_query(("update " + INTERVALS_TABLE + " set job_id=1, started=1, finished=1 "
"where chrom='%(current_chrom)s' and start_pos=%(start_pos)s and end_pos=%(end_pos)s") % locals())
if reset_variants_with_fewer_than_expected_available_samples:
print("=== reset_variants_with_fewer_than_expected_available_samples ===")
# Reset samples to finished = 0 where hc_u
run_query(("update sample as s join variant as v on "
"v.chrom=s.chrom and v.pos=s.pos and v.ref=s.ref and v.alt=s.alt and v.het_or_hom_or_hemi=s.het_or_hom_or_hemi "
"set s.finished=0, s.comments=NULL, hc_succeeded=0, hc_error_code=NULL, hc_error_text=NULL, sample_i=NULL, original_bam_path=NULL, original_gvcf_path=NULL, output_bam_path=NULL, hc_command_line=NULL "
"where v.n_available_samples>=0 and v.n_available_samples<v.n_expected_samples and "
"v.finished=0 and s.finished=1 and s.hc_succeeded=0"))
# Reset variants to finished = 0 where number of records in the sample table that either succeeded or had an error is < n_expected_samples
run_query(("update variant as v "
"set v.finished=0, v.comments=NULL, n_available_samples=NULL, n_expected_samples=NULL, readviz_bam_paths=NULL "
"where n_available_samples<n_expected_samples and "
"n_expected_samples > ("
" select count(*) from sample as s where chrom=v.chrom and pos=v.pos and alt=v.alt and het_or_hom_or_hemi=v.het_or_hom_or_hemi and (hc_succeeded=1 or hc_error_code>0)"
")"))
# Reset variants to finished = 0 where the variant.n_available_samples < records in the sample table that have hc_succeeded=1
run_query(("update variant as v "
"set v.finished=0, v.comments=NULL, n_available_samples=NULL, n_expected_samples=NULL, readviz_bam_paths=NULL "
"where n_available_samples<n_expected_samples and n_available_samples < ("
" select count(*) from sample as s where chrom=v.chrom and pos=v.pos and ref=v.ref and alt=v.alt and het_or_hom_or_hemi=v.het_or_hom_or_hemi and hc_succeeded=1"
")"))
if reset_variants_with_bams_in_db_but_not_on_disk:
print("=== reset_variants_with_bams_in_db_but_not_on_disk ===")
import glob
os.chdir("/broad/hptmp/exac_readviz_backend/")
for current_chrom in ALL_CHROMS:
print("globbing for all bam files in chr%s" % current_chrom)
actual_files_on_disk = set(glob.glob(current_chrom +"/*/chr*.bam"))
for t in run_query("select readviz_bam_paths, chrom, pos, ref, alt, het_or_hom_or_hemi from variant "
"where chrom='%(current_chrom)s' and finished=1 and n_available_samples>0" % locals()).fetchall():
cached_filenames_list = t[0].split('|')
cached_filenames_set = set(cached_filenames_list)
duplicates_found = len(cached_filenames_set) < len(cached_filenames_list)
some_cached_files_not_found_on_disk = len(cached_filenames_set - actual_files_on_disk) > 0
if duplicates_found or some_cached_files_not_found_on_disk:
if some_cached_files_not_found_on_disk:
print('readviz_bam_paths that are no longer found on disk: %s ' % str(cached_filenames_set - actual_files_on_disk))
else:
print('duplicates_found: %s' % str(cached_filenames_list))
run_query("update variant as v "
"set v.finished=0, v.comments=NULL, n_available_samples=NULL, n_expected_samples=NULL, readviz_bam_paths=NULL "
"where chrom='%s' and pos=%s and ref='%s' and alt='%s' and het_or_hom_or_hemi='%s' " % t[1:])
run_query("update sample set started=0, started_time=NULL, finished=0, finished_time=NULL, hc_succeeded=0, hc_error_text=NULL, hc_error_code=NULL, comments=NULL "
"where chrom='%s' and pos=%s and ref='%s' and alt='%s' and het_or_hom_or_hemi='%s' " % t[1:])
if reset_variants_that_contain_unfinished_samples:
print("=== reset_variants_that_contain_unfinished_samples ===")
run_query(("update sample as s join variant as v on "
"v.chrom=s.chrom and v.pos=s.pos and v.ref=s.ref and v.alt=s.alt and v.het_or_hom_or_hemi=s.het_or_hom_or_hemi "
"set s.finished=0, s.comments=NULL, hc_succeeded=0, hc_error_code=NULL, hc_error_text=NULL, sample_i=NULL, original_bam_path=NULL, original_gvcf_path=NULL, output_bam_path=NULL, hc_command_line=NULL "
"where v.n_available_samples>=0 and v.n_available_samples<v.n_expected_samples and "
"s.finished=0"))
run_query(("update variant as v join sample as s on "
"v.chrom=s.chrom and v.pos=s.pos and v.ref=s.ref and v.alt=s.alt and v.het_or_hom_or_hemi=s.het_or_hom_or_hemi "
"set v.finished=0, v.comments=NULL, n_available_samples=NULL, n_expected_samples=NULL, readviz_bam_paths=NULL "
"where s.finished=0"))
if reset_intervals_that_contain_unfinished_variants:
print("=== reset_intervals_that_contain_unfinished_variants ===")
for current_chrom in FINISHED_CHROMS:
c = run_query("select chrom, pos from variant as v where chrom='%(current_chrom)s' and v.finished=0 order by pos asc" % locals())
all_unfinished_variants = c.fetchall()
unfinished_intervals = set()
current_interval = None
print("Checking for unfinished_intervals using %s unfinished variants in chr%s" % (len(all_unfinished_variants), current_chrom))
for chrom, pos in all_unfinished_variants:
pos = int(pos)
if current_interval is None or current_interval[0] != chrom or pos < current_interval[1] or pos > current_interval[2]:
for i in all_intervals[chrom]:
if i[1] <= pos and pos <= i[2]:
current_interval = i
sys.stdout.write("Found matching interval %s for variant: %s" % (i, "%s:%s" % (chrom, pos)))
if i[3] > 0 or i[4] > 0:
unfinished_intervals.add(i)
sys.stdout.write(". Will reset it..\n")
else:
sys.stdout.write(". It's already marked as not started.\n")
#print("%s: %s" % (len(unfinished_intervals), i))
#print("%(chrom)s %(pos)s is in interval %(i)s" % locals())
break
else:
raise ValueError("%(chrom)s-%(pos)s is not in any intervals" % locals())
#else:
#print("%(chrom)s %(pos)s is in same interval %(current_interval)s" % locals())
print("Found %s unfinished intervals in %s" % (len(unfinished_intervals), current_chrom))
print("Updating intervals")
for i in unfinished_intervals:
chrom = i[0]
start_pos = i[1]
end_pos = i[2]
run_query(("update " + INTERVALS_TABLE +
"set job_id=NULL, comments=NULL, unique_id=NULL, task_id=NULL, started=0, started_date=NULL, error_message=NULL, error_code=0, finished=0, error_message=NULL "
"where chrom='%(chrom)s' and start_pos=%(start_pos)s and end_pos=%(end_pos)s") % locals())
#print_query("update python3_4_generate_HC_bams_py_i200 set "
# "job_id=NULL, task_id=NULL, unique_id=NULL, started=0, "
# "started_date=NULL, finished=0, finished_date=NULL, "
# "error_code=500, error_message=NULL where finished=0 and started_date <")
if reset_samples_with_transient_error:
run_query(("update sample set started=0, started_time=NULL, finished=0, finished_time=NULL, hc_succeeded=0, hc_error_text=NULL, hc_error_code=NULL, comments=NULL "
"where hc_error_code >= 2000 and hc_error_code < 3000 and chrom in %(FINISHED_CHROMS_STRING)s") % locals())
if reset_unfinished_samples_in_finished_chroms:
run_query("update sample set started=0, started_time=NULL, finished=0, finished_time=NULL, hc_succeeded=0, hc_error_text=NULL, hc_error_code=NULL, comments=NULL "
"where chrom in %(FINISHED_CHROMS_STRING)s and started in (0, 1) and finished=0" % locals())
if reset_intervals_that_contain_unfinished_samples:
print("=== reset_intervals_that_contain_unfinished_samples ===")
for current_chrom in FINISHED_CHROMS:
c = run_query("select chrom, pos from sample as s where chrom='%(current_chrom)s' and s.started in (0, 1) and s.finished=0 order by pos asc" % locals())
all_unfinished_samples = c.fetchall()
unfinished_intervals = set()
current_interval = None
print("Checking for unfinished_intervals using %s unfinished samples in chr%s" % (len(all_unfinished_samples), current_chrom))
for chrom, pos in all_unfinished_samples:
pos = int(pos)
if current_interval is None or current_interval[0] != chrom or pos < current_interval[1] or pos > current_interval[2]:
for i in all_intervals[chrom]:
if i[1] <= pos and pos <= i[2]:
current_interval = i
sys.stdout.write("Found matching interval %s for variant: %s" % (i, "%s:%s" % (chrom, pos)))
if not (i[3] == 0 and i[4] == 0): # reset intervals that are not started or not finished
unfinished_intervals.add(i)
sys.stdout.write(". Will reset it..\n")
else:
sys.stdout.write(". It's already marked as not started.\n")
break
else:
raise ValueError("%(chrom)s-%(pos)s is not in any intervals" % locals())
#else:
#print("%(chrom)s %(pos)s is in same interval %(current_interval)s" % locals())
print("Found %s unfinished intervals in %s" % (len(unfinished_intervals), current_chrom))
print("Updating intervals")
for i in unfinished_intervals:
chrom = i[0]
start_pos = i[1]
end_pos = i[2]
run_query(("update %(INTERVALS_TABLE)s "
"set job_id=null, task_id=null, unique_id=null, started=0, started_date=null, finished=0, finished_date=null, "
"error_code=0, error_message=null, priority=null, username=null, machine_hostname=null, machine_average_load=null, comments=null "
"where chrom='%(chrom)s' and start_pos=%(start_pos)s and end_pos=%(end_pos)s") % locals())
#print_query("update python3_4_generate_HC_bams_py_i200 set "
# "job_id=NULL, task_id=NULL, unique_id=NULL, started=0, "
# "started_date=NULL, finished=0, finished_date=NULL, "
# "error_code=500, error_message=NULL where finished=0 and started_date <")
if reset_intervals_that_had_error_code:
print("=== reset_intervals_that_had_error_code ===")
run_query("update " + INTERVALS_TABLE +
"set job_id=null, task_id=null, unique_id=null, started=0, started_date=null, finished=0, finished_date=null, "
"error_code=0, error_message=null, priority=null, username=null, machine_hostname=null, machine_average_load=null, comments=null "
"where error_code > 0")
if reset_unfinished_intervals_to_clear_job_id:
print("=== reset_unfinished_intervals_to_clear_job_id ===")
run_query("update " + INTERVALS_TABLE +
"set job_id=null, task_id=null, unique_id=null, started=0, started_date=null, finished=0, finished_date=null, "
"error_code=0, error_message=null, priority=null, username=null, machine_hostname=null, machine_average_load=null, comments=null "
"where finished=0")
print("Done")
"""
+----------------------+--------------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+----------------------+--------------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| chrom | varchar(5) | NO | MUL | NULL | |
| start_pos | int(11) | NO | | NULL | |
| end_pos | int(11) | NO | | NULL | |
| job_id | int(11) | YES | MUL | NULL | |
| task_id | int(11) | YES | | NULL | |
| unique_id | int(11) | YES | | NULL | |
| started | tinyint(1) | NO | MUL | NULL | |
| started_date | datetime | YES | MUL | NULL | |
| finished | tinyint(1) | NO | MUL | NULL | |
| finished_date | datetime | YES | | NULL | |
| error_code | int(11) | NO | MUL | NULL | |
| error_message | longtext | YES | | NULL | |
| machine_hostname | varchar(100) | YES | | NULL | |
| machine_average_load | float | YES | | NULL | |
+----------------------+--------------+------+-----+---------+----------------+
"""
c.close()
conn.close()
| mit | -1,797,212,239,246,498,000 | 61.918552 | 216 | 0.57091 | false |
parkrrr/skybot | plugins/stock.py | 1 | 1208 | import json, urllib2
from util import hook, http
@hook.command
def stock(inp):
'''.stock <symbol> -- gets stock information'''
symbols = inp.split(',')
# who needs more than 3 symbols
url = ('http://finance.google.com/finance/info?client=ig&q=%s' % ','.join(symbols[:3]))
try:
raw = http.get(url)
except urllib2.HTTPError as err:
if err.code == 400:
return "unknown ticker symbol %s" % inp
else:
return "error %s while retrieving data" % err.code
# remove the comment stuff
fixed = raw.replace('//', '')
parsed = json.loads(fixed)
s = []
for q in parsed:
quote = parsed[0]
change = float(q['c'])
quote['Name'] = q['t']
quote['ChangePercentage'] = q['cp']
quote['LastTradePriceOnly'] = "%.2f" % float(q['l'])
quote['Change'] = ("+%.2f" % change) if change >= 0 else change
if change < 0:
quote['color'] = "5"
else:
quote['color'] = "3"
ret = "%(Name)s - %(LastTradePriceOnly)s \x03%(color)s%(Change)s (%(ChangePercentage)s%%)\x03" % quote
s.append(ret)
return ', '.join(s)
| unlicense | 2,358,894,156,922,569,000 | 27.761905 | 110 | 0.52649 | false |
onjin/docker | sentry/sentry.conf.py | 1 | 1182 | # /etc/sentry.conf.py
import os
dbname = os.environ.get('SENTRY_DBNAME', 'sentry')
dbuser = os.environ.get('SENTRY_DBUSER', 'sentry')
dbpass = os.environ.get('SENTRY_DBPASS', 'sentry')
dbhost = os.environ.get('SENTRY_DBHOST', '127.0.0.1')
dbport = os.environ.get('SENTRY_DBPORT', '')
sentry_workers = os.environ.get('SENTRY_WORKERS', 3)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': dbname,
'USER': dbuser,
'PASSWORD': dbpass,
'HOST': dbhost,
'PORT': dbport,
}
}
# No trailing slash!
SENTRY_URL_PREFIX = 'http://localhost:7365'
# SENTRY_KEY is a unique randomly generated secret key for your server, and it
# acts as a signing token
SENTRY_KEY = '0123456789abcde'
SENTRY_WEB_HOST = '0.0.0.0'
SENTRY_WEB_PORT = 7365
SENTRY_WEB_OPTIONS = {
'workers': int(sentry_workers), # the number of gunicorn workers
'secure_scheme_headers': {'X-FORWARDED-PROTO': 'https'},
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_HOST_PASSWORD = ''
EMAIL_HOST_USER = ''
EMAIL_PORT = 25
EMAIL_USE_TLS = False
ALLOWED_HOSTS = ['localhost', ]
| bsd-2-clause | 2,162,608,814,997,409,800 | 25.863636 | 78 | 0.664129 | false |
djsilenceboy/LearnTest | Python_Test/PySample1/com/djs/learn/metaclass/TestMetaSerialize.py | 1 | 1732 | '''
Created on Apr 15, 2016
@author: dj
'''
import json
registry = {}
def register_class(target_class):
print("register_class: class name =", target_class.__name__)
registry[target_class.__name__] = target_class
class Serializable(object):
def __init__(self, *args):
print("Serializable: {0}.{1}".format(self.__class__.__name__, args))
self.args = args
def serialize(self):
return json.dumps({
"class": self.__class__.__name__,
"args": self.args})
def deserialize(data):
params = json.loads(data)
print("deserialize: params =", params)
name = params["class"]
target_class = registry[name]
return target_class(*params["args"])
class Meta(type):
def __new__(cls, name, baseclass, class_dict):
print("Meta.meta = {}".format(cls))
print("Meta.name = {}".format(name))
print("Meta.baseclass = {}".format(baseclass))
print("Meta.class_dict = {}".format(class_dict))
child_cls = super().__new__(cls, name, baseclass, class_dict)
register_class(child_cls)
return child_cls
class RegisteredSerializable(Serializable, metaclass=Meta):
pass
class Point3D(RegisteredSerializable):
def __init__(self, x, y, z):
super().__init__(x, y, z)
self.x = x
self.y = y
self.z = z
print("Point3D: x, y, z = {}, {}, {}".format(x, y, z))
point1 = Point3D(10, 11, 12)
print("point1 =", point1)
print("-" * 40)
data = point1.serialize()
print("data =", data)
print("-" * 40)
point2 = deserialize(data)
print("point2 =", point2)
print("-" * 40)
point3 = Point3D(100, 111, 122)
print("point3 =", point3)
if __name__ == '__main__':
pass
| apache-2.0 | 9,222,040,938,374,815,000 | 19.619048 | 76 | 0.57679 | false |
pyspace/test | pySPACE/missions/nodes/spatial_filtering/ica.py | 1 | 6780 | """ Independent Component Analysis variants """
import os
import cPickle
from copy import deepcopy
import numpy
try:
import mdp
from mdp.nodes import FastICANode
except:
pass
from pySPACE.missions.nodes.spatial_filtering.spatial_filtering import SpatialFilteringNode
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.tools.filesystem import create_directory
import logging
try:
class FastICANodeWrapper(FastICANode):
# The only reason for this node is to deal with the fact
# that the ICANode super class does not accept the output_dim kwarg
def __init__(self, trainable=True,*args, **kwargs):
if "output_dim" in kwargs:
kwargs.pop("output_dim")
if trainable==False:
self._trainable=False
super(FastICANodeWrapper, self).__init__(*args, **kwargs)
def is_training(self):
"""Return True if the node is in the training phase,
False otherwise."""
return self._training
except:
print "import failed"
class ICAWrapperNode(SpatialFilteringNode): #, FastICANodeWrapper):
""" Wrapper around the Independent Component Analysis filtering of mdp
This Node implements the unsupervised independent component
analysis algorithm for spatial filtering.
**Parameters**
:retained_channels: Determines how many of the ICA pseudo channels
are retained. Default is None which means "all channels".
(*optional, default: None*)
:load_path: An absolute path from which the ICA filter
is loaded.
If not specified, this matrix is learned from the training data.
(*optional, default: None*)
**Exemplary Call**
.. code-block:: yaml
-
node : ICA
parameters:
retained_channels : 42
"""
def __init__(self, retained_channels=None, load_path = None, **kwargs):
# Must be set before constructor of superclass is set
self.trainable = (load_path == None)
if "output_dim" in kwargs:
kwargs.pop("output_dim")
super(ICAWrapperNode, self).__init__(**kwargs)
# Load filters from file if requested
wrapped_node=None
if load_path != None:
filters_file = open(load_path, 'r')
filters, white, whitened = cPickle.load(filters_file)
wrapped_node = FastICANodeWrapper(trainable=False)
wrapped_node.filters=filters
wrapped_node.white = white
wrapped_node.whitened = whitened
wrapped_node._training = False
wrapped_node._train_phase = -1
wrapped_node._train_phase_started = False
self.set_permanent_attributes(filters=filters, white=white,
whitened=whitened)
self.set_permanent_attributes(# The number of channels that will be retained
retained_channels=retained_channels,
# Determine whether this node is trainable
trainable=(load_path == None),
output_dim=retained_channels,
new_channel_names = None,
channel_names = None,
wrapped_node=wrapped_node)
def is_trainable(self):
""" Returns whether this node is trainable. """
return self.trainable
def is_supervised(self):
""" Returns whether this node requires supervised training. """
return False
def train(self, data, label = None):
super(ICAWrapperNode, self).train(data)
def _train(self, data, label = None):
""" Uses *data* to learn a decomposition into independent components."""
# We simply ignore the class label since we
# are doing unsupervised learning
if self.channel_names is None:
self.channel_names = data.channel_names
if self.wrapped_node is None:
self.wrapped_node = FastICANode()
self.wrapped_node.train(data)
def _execute(self, data):
""" Execute learned transformation on *data*.
Changes the base of the space in which the data is located so
that the dimensions correspond to independent components
"""
# If this is the first data sample we obtain
if self.retained_channels == None:
# Count the number of channels
self.set_permanent_attributes(retained_channels = data.shape[1])
if self.channel_names is None:
self.channel_names = data.channel_names
if len(self.channel_names)<self.retained_channels:
self.retained_channels = len(self.channel_names)
self._log("To many channels chosen for the retained channels! Replaced by maximum number.",level=logging.CRITICAL)
if not(self.output_dim==self.retained_channels):
# overwrite internal output_dim variable, since it is set wrong
self._output_dim = self.retained_channels
projected_data = self.wrapped_node.execute(data.view(numpy.ndarray)) #super(ICAWrapperNode, self)._execute(data)
# Select the channels that should be retained
# Note: We have to create a new array since otherwise the removed
# channels remains in memory
projected_data = numpy.array(projected_data[:, :self.retained_channels])
if self.new_channel_names is None:
self.new_channel_names = ["ica%03d" % i
for i in range(projected_data.shape[1])]
return TimeSeries(projected_data, self.new_channel_names,
data.sampling_frequency, data.start_time,
data.end_time, data.name, data.marker_name)
def store_state(self, result_dir, index=None):
""" Stores this node in the given directory *result_dir*. """
if self.store:
node_dir = os.path.join(result_dir, self.__class__.__name__)
create_directory(node_dir)
# This node only stores the learned eigenvector and eigenvalues
name = "%s_sp%s.pickle" % ("filters", self.current_split)
result_file = open(os.path.join(node_dir, name), "wb")
result_file.write(cPickle.dumps((self.wrapped_node.filters, self.wrapped_node.white, self.wrapped_node.whitened),
protocol=2))
result_file.close()
def get_filter(self):
return self.get_projmatrix()
_NODE_MAPPING = {"ICA": ICAWrapperNode}
| gpl-3.0 | 8,658,471,178,082,416,000 | 38.649123 | 126 | 0.60118 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.