content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!usr/bin/env python
import pyeapi
import yaml
from getpass import getpass
from pprint import pprint
from jinja2 import Template
## Loading the yaml file
with open("arista_connect1.yml") as f:
device_dict = yaml.load(f)
new_list = []
### Keys in the dictionary stored in a list
for k in device_dict.keys():
new_list.append(k)
### data and connect for 4 arista switches
intf_vars = {}
connect_dict = {}
arista_1 = device_dict[new_list[0]]
arista_2 = device_dict[new_list[1]]
arista_3 = device_dict[new_list[2]]
arista_4 = device_dict[new_list[3]]
for k,v in arista_1.items():
if k == 'data':
intf_vars = arista_1[k]
else:
connect_dict[k] = arista_1[k]
connection = pyeapi.client.connect(**connect_dict,password=getpass())
device = pyeapi.client.Node(connection)
interface_config = '''
interface {{ intf_name }}
ip address {{ intf_ip }}/{{ intf_mask }}
'''
j2_template = Template(interface_config)
output = j2_template.render(**intf_vars)
config = (output.strip('/n')).split('\n')
cfg = config[1:3]
out = device.config(cfg)
print(out)
show_ip_int = device.enable("show ip interface brief")
pprint(show_ip_int)
#### For arista switch 2
for k,v in arista_2.items():
if k == 'data':
intf_vars = arista_2[k]
else:
connect_dict[k] = arista_2[k]
connection = pyeapi.client.connect(**connect_dict,password=getpass())
device = pyeapi.client.Node(connection)
output = j2_template.render(**intf_vars)
config = (output.strip('/n')).split('\n')
cfg = config[1:3]
out = device.config(cfg)
print(out)
show_ip_int = device.enable("show ip interface brief")
pprint(show_ip_int)
### Arista switch 3
for k,v in arista_3.items():
if k == 'data':
intf_vars = arista_3[k]
else:
connect_dict[k] = arista_3[k]
connection = pyeapi.client.connect(**connect_dict,password=getpass())
device = pyeapi.client.Node(connection)
output = j2_template.render(**intf_vars)
config = (output.strip('/n')).split('\n')
cfg = config[1:3]
out = device.config(cfg)
print(out)
show_ip_int = device.enable("show ip interface brief")
pprint(show_ip_int)
#### For arista switch 4
for k,v in arista_4.items():
if k == 'data':
intf_vars = arista_4[k]
else:
connect_dict[k] = arista_4[k]
connection = pyeapi.client.connect(**connect_dict,password=getpass())
device = pyeapi.client.Node(connection)
output = j2_template.render(**intf_vars)
config = (output.strip('/n')).split('\n')
cfg = config[1:3]
out = device.config(cfg)
print(out)
show_ip_int = device.enable("show ip interface brief")
pprint(show_ip_int)
| 29.505747 | 69 | 0.701987 | [
"Apache-2.0"
] | ramyacr97/RamyaPython | arista_exercise/ex4_example.py | 2,567 | Python |
#!C:\Users\Claudia\PycharmProjects\BlackJack\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| 32.384615 | 70 | 0.672209 | [
"MIT"
] | hillc255/BlackJack | venv/Scripts/pip3.7-script.py | 421 | Python |
# -*- coding: utf-8 -*-
"""
Source: https://github.com/awesto/django-shop/blob/12e246b356dbc1bc5bbdc8f056e3cb109c617997/shop/money/__init__.py
"""
from .money_maker import MoneyMaker, AbstractMoney
# The default Money type for this shop
Money = MoneyMaker()
| 28.777778 | 114 | 0.764479 | [
"BSD-3-Clause"
] | infolabs/django-edw-shop | backend/edw_shop/money/__init__.py | 259 | Python |
# -*- coding: utf-8 -*-
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# imports ######################################################################
import rsa
# participants #################################################################
(sender_pub_key, sender_priv_key) = rsa.newkeys(1024, poolsize=4)
(receiver_pub_key, receiver_priv_key) = rsa.newkeys(1024, poolsize=4)
# side of sender ###############################################################
message = 'Hello'
print('Message:', message)
bin_message = message.encode()
print('Bytes message:', bin_message)
signature = rsa.sign(bin_message, sender_priv_key, 'SHA-512')
print('Signature:', signature)
crypto = rsa.encrypt(bin_message, receiver_pub_key)
print('Encrypted message:', crypto)
# side of receiver
bin_decrypted = rsa.decrypt(crypto, receiver_priv_key)
print('Decrypted bytes message:', bin_decrypted)
if rsa.verify(bin_decrypted, signature, sender_pub_key):
print('Message signature verified.')
else:
print('SIGNATURE VERIFICATION ERROR!')
decrypted = bin_decrypted.decode()
print('Decrypted message:', decrypted)
# END ##########################################################################
| 29.347826 | 80 | 0.591111 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | sunarch/libmonty | other/tests/rsa-example.py | 1,350 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from msrest import Deserializer, Serializer
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from ._configuration import PolicyClientConfiguration
from .operations import PolicyAssignmentsOperations, PolicyDefinitionsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class PolicyClient:
"""To manage and control access to your resources, you can define customized policies and assign
them at a scope.
:ivar policy_assignments: PolicyAssignmentsOperations operations
:vartype policy_assignments:
azure.mgmt.resource.policy.v2016_04_01.aio.operations.PolicyAssignmentsOperations
:ivar policy_definitions: PolicyDefinitionsOperations operations
:vartype policy_definitions:
azure.mgmt.resource.policy.v2016_04_01.aio.operations.PolicyDefinitionsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2016-04-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = PolicyClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.policy_assignments = PolicyAssignmentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.policy_definitions = PolicyDefinitionsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "PolicyClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 44.494949 | 125 | 0.706016 | [
"MIT"
] | AikoBB/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2016_04_01/aio/_policy_client.py | 4,405 | Python |
from typing import Tuple, Union
from discord import Embed, Member, PermissionOverwrite, TextChannel, VoiceChannel, VoiceState
from discord.ext.commands import bot_has_guild_permissions
from discord_slash import (
Button,
ComponentContext,
Modal,
ModalContext,
Select,
SelectOption,
SlashCommandOptionType,
SlashContext,
TextInput,
TextInputStyle,
)
from discord_slash.cog_ext import cog_subcommand as slash_subcommand
from discord_slash.utils.manage_commands import create_option
from utils import (
AsteroidBot,
Cog,
DiscordColors,
DontHavePrivateRoom,
GuildData,
GuildPrivateVoice,
bot_owner_or_permissions,
cog_is_enabled,
get_content,
is_enabled,
)
class PrivateRooms(Cog):
def __init__(self, bot: AsteroidBot) -> None:
self.bot = bot
self.emoji = "🔊"
self.name = "PrivateRooms"
async def __check(
self, ctx: SlashContext, *, return_guild_data: bool = False
) -> Union[Tuple[VoiceChannel, dict], Tuple[VoiceChannel, dict, GuildData]]:
guild_data = await self.bot.get_guild_data(ctx.guild_id)
active_channels = guild_data.private_voice.active_channels
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if str(ctx.author_id) not in active_channels:
raise DontHavePrivateRoom
voice_channel: VoiceChannel = ctx.guild.get_channel(active_channels[str(ctx.author_id)])
if return_guild_data:
return voice_channel, content, guild_data
return voice_channel, content
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="close",
description="Closes your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_close(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, connect=False)
await ctx.send(content["ROOM_CLOSED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="open",
description="Opens your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_open(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, connect=True)
await ctx.send(content["ROOM_OPENED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="hide",
description="Hides your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_hide(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=False)
await ctx.send(content["ROOM_HIDED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="unhide",
description="Unhides your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_unhide(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=True)
await ctx.send(content["ROOM_UNHIDED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="change_name",
description="Change room name",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_change__name(self, ctx: SlashContext, name: str):
voice_channel, content = await self.__check(ctx)
await voice_channel.edit(name=name)
await ctx.send(content["ROOM_NAME_WAS_SETUP"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="ban",
description="Bans member to room",
)
@is_enabled()
@bot_has_guild_permissions(move_members=True, manage_channels=True)
async def private__rooms_control_ban(self, ctx: SlashContext, member: Member):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(member, connect=False)
if member.voice and member.voice.channel.id == voice_channel.id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_BANNED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="unban",
description="Unbans member from room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_unban(self, ctx: SlashContext, member: Member):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(member, connect=True)
await ctx.send(content["MEMBER_WAS_UNBANNED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="kick",
description="Kicks a member from room",
)
@is_enabled()
@bot_has_guild_permissions(move_members=True, manage_channels=True)
async def private__rooms_control_kick(self, ctx: SlashContext, member: Member):
voice_channel, content = await self.__check(ctx)
if member.voice and member.voice.channel.id == voice_channel.id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_KICKED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="transfer_ownership",
description="Transfer room ownership",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_transfer__ownership(self, ctx: SlashContext, member: Member):
voice_channel, content, guild_data = await self.__check(ctx, return_guild_data=True)
await guild_data.private_voice.set_private_voice_channel(member.id, voice_channel.id)
await voice_channel.set_permissions(
member, manage_channels=True, connect=True, move_members=True
)
await voice_channel.set_permissions(
ctx.author, manage_channels=False, connect=False, move_members=False
)
await ctx.send(content["OWNERSHIP_TRANSFERED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="set_limit",
description="Sets room limit",
options=[
create_option(
name="limit",
description="The limit of members in your room",
option_type=SlashCommandOptionType.INTEGER,
min_value=1,
max_value=99,
)
],
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_set__limit(self, ctx: SlashContext, limit: int):
voice_channel, content = await self.__check(ctx)
await voice_channel.edit(user_limit=limit)
await ctx.send(content["LIMIT_WAS_SETUP"], hidden=True)
@slash_subcommand(
base="private_rooms",
name="create_menu",
description="Creates a control menu",
)
@is_enabled()
@bot_has_guild_permissions(move_members=True, manage_channels=True)
@bot_owner_or_permissions(manage_guild=True)
async def private__rooms_create__menu(self, ctx: SlashContext):
await ctx.defer(hidden=True)
guild_data = await self.bot.get_guild_data(ctx.guild_id)
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
components = [
[
Button(emoji=self.bot.get_emoji(959124362840113182), custom_id="voice_close"),
Button(emoji=self.bot.get_emoji(959124362890461254), custom_id="voice_open"),
Button(emoji=self.bot.get_emoji(959124362890461325), custom_id="voice_hide"),
Button(emoji=self.bot.get_emoji(959124362890473582), custom_id="voice_unhide"),
Button(
emoji=self.bot.get_emoji(959124362798174319), custom_id="voice_change_room_name"
),
],
[
Button(emoji=self.bot.get_emoji(959124362882068550), custom_id="voice_ban"),
Button(emoji=self.bot.get_emoji(959124362835931236), custom_id="voice_unban"),
Button(emoji=self.bot.get_emoji(959124362974343169), custom_id="voice_kick"),
Button(emoji=self.bot.get_emoji(959124362823340052), custom_id="voice_transfer"),
Button(
emoji=self.bot.get_emoji(959124362835927080), custom_id="voice_set_room_limit"
),
],
]
category = await ctx.guild.create_category(content["PRIVATE_ROOMS"])
voice_channel = await category.create_voice_channel(content["CREATE_ROOM"])
overwrites = {
ctx.guild.default_role: PermissionOverwrite(
send_messages=False, use_slash_commands=False
)
}
text_channel: TextChannel = await category.create_text_channel(
content["ROOM_CONTROL"], overwrites=overwrites
)
await guild_data.create_private_voice(text_channel.id, voice_channel.id)
embed = Embed(
title=content["ROOM_CONTROL_TITLE"],
description="".join(content["ROOM_CONTROL_DESCRIPTION"]),
color=DiscordColors.EMBED_COLOR,
)
await text_channel.send(embed=embed, components=components)
await ctx.send(content["SUCCESSFULLY_CREATED"])
@Cog.listener()
@cog_is_enabled()
async def on_voice_state_update(self, member: Member, before: VoiceState, after: VoiceState):
guild_data = await self.bot.get_guild_data(member.guild.id)
private_voice = guild_data.private_voice
voice_channel_id = private_voice.voice_channel_id
if after.channel and after.channel.id == voice_channel_id:
if before.channel:
await self._check_channel(member, before, private_voice)
# Creating a private voice channel
overwrites = {
member.guild.default_role: PermissionOverwrite(connect=False),
member: PermissionOverwrite(manage_channels=True, connect=True, move_members=True),
}
channel: VoiceChannel = await after.channel.category.create_voice_channel(
f"{member.display_name}'s channel", overwrites=overwrites
)
await member.move_to(channel)
await private_voice.set_private_voice_channel(member.id, channel.id)
return
if before.channel:
await self._check_channel(member, before, private_voice)
async def _check_channel(
self, member: Member, before: VoiceState, private_voice: GuildPrivateVoice
):
if not (channel_id := private_voice.active_channels.get(str(member.id))):
return
if before.channel.id != channel_id:
return
if not before.channel.members:
await before.channel.delete()
await private_voice.delete_private_voice_channel(member.id)
return
first_member = before.channel.members[0]
await private_voice.set_private_voice_channel(first_member.id, before.channel.id)
await before.channel.set_permissions(
member, manage_channels=False, connect=False, move_members=False
)
await before.channel.set_permissions(
first_member, manage_channels=True, connect=True, move_members=True
)
@Cog.listener()
@cog_is_enabled()
async def on_button_click(self, ctx: ComponentContext):
if not ctx.custom_id.startswith("voice"):
return
guild_data = await self.bot.get_guild_data(ctx.guild_id)
active_channels = guild_data.private_voice.active_channels
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if str(ctx.author_id) not in active_channels:
return await ctx.send(content["DONT_HAVE_PRIVATE_ROOM"], hidden=True)
voice_channel: VoiceChannel = ctx.guild.get_channel(active_channels[str(ctx.author_id)])
match ctx.custom_id:
case "voice_close":
await voice_channel.set_permissions(ctx.guild.default_role, connect=False)
await ctx.send(content["ROOM_CLOSED"], hidden=True)
case "voice_open":
await voice_channel.set_permissions(ctx.guild.default_role, connect=True)
await ctx.send(content["ROOM_OPENED"], hidden=True)
case "voice_hide":
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=False)
await ctx.send(content["ROOM_HIDED"], hidden=True)
case "voice_unhide":
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=True)
await ctx.send(content["ROOM_UNHIDED"], hidden=True)
case "voice_change_room_name":
modal = Modal(
custom_id="voice_modal_change_room_name",
title=content["PRIVATE_ROOM_CONTROL_MODAL"],
components=[
TextInput(
custom_id="channel_name",
label=content["ROOM_NAME"],
style=TextInputStyle.SHORT,
)
],
)
await ctx.popup(modal)
case "voice_ban" | "voice_unban" | "voice_kick" | "voice_transfer":
modal = Modal(
custom_id=f"voice_modal_{ctx.custom_id.replace('voice', '')}",
title=content["PRIVATE_ROOM_CONTROL_MODAL"],
components=[
TextInput(
custom_id="user_id",
label=content["MEMBER_ID"],
style=TextInputStyle.SHORT,
)
],
)
await ctx.popup(modal)
case "voice_set_room_limit":
select = Select(
custom_id="voice_select_set_room_limit",
options=[
SelectOption(label=content["REMOVE_LIMIT"], value=0),
SelectOption(label="2", value=2),
SelectOption(label="3", value=3),
SelectOption(label="4", value=4),
SelectOption(label="5", value=5),
SelectOption(label="10", value=10),
],
)
await ctx.send(content["SETUP_ROOM_LIMIT"], components=[select], hidden=True)
@Cog.listener()
@cog_is_enabled()
async def on_select_option(self, ctx: ComponentContext):
if not ctx.custom_id.startswith("voice"):
return
guild_data = await self.bot.get_guild_data(ctx.guild_id)
active_channels = guild_data.private_voice.active_channels
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if str(ctx.author_id) not in active_channels:
return await ctx.send(content["DONT_HAVE_PRIVATE_ROOM"], hidden=True)
voice_channel: VoiceChannel = ctx.guild.get_channel(active_channels[str(ctx.author_id)])
await voice_channel.edit(user_limit=ctx.values[0])
await ctx.send(content["LIMIT_WAS_SETUP"], hidden=True)
@Cog.listener(name="on_modal")
@cog_is_enabled()
async def on_voice_modal(self, ctx: ModalContext):
if not ctx.custom_id.startswith("voice"):
return
await ctx.defer(hidden=True)
guild_data = await self.bot.get_guild_data(ctx.guild_id)
voice_channel_id = guild_data.private_voice.active_channels.get(str(ctx.author_id))
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if voice_channel_id is None:
return await ctx.send(content["DONT_HAVE_PRIVATE_ROOM"], hidden=True)
voice_channel: VoiceChannel = ctx.guild.get_channel(voice_channel_id)
if channel_name := ctx.values.get("channel_name"):
await voice_channel.edit(name=channel_name)
return await ctx.send(content["ROOM_NAME_WAS_SETUP"], hidden=True)
user_id: str = ctx.values["user_id"]
if not user_id.isdigit():
return await ctx.send(content["NOT_ID"], hidden=True)
member: Member = ctx.guild.get_member(int(user_id))
if member is None:
return await ctx.send(content["NOT_MEMBER_ID"], hidden=True)
match ctx.custom_id:
case "voice_modal_ban":
await voice_channel.set_permissions(member, connect=False)
if member.voice and member.voice.channel.id == voice_channel_id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_BANNED"], hidden=True)
case "voice_modal_unban":
await voice_channel.set_permissions(member, connect=True)
await ctx.send(content["MEMBER_WAS_UNBANNED"], hidden=True)
case "voice_modal_kick":
if member.voice and member.voice.channel.id == voice_channel_id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_KICKED"], hidden=True)
case "voice_modal_transfer":
await guild_data.private_voice.set_private_voice_channel(user_id, voice_channel_id)
await voice_channel.set_permissions(
member, manage_channels=True, connect=True, move_members=True
)
await voice_channel.set_permissions(
ctx.author, manage_channels=False, connect=False, move_members=False
)
await ctx.send(content["OWNERSHIP_TRANSFERED"], hidden=True)
def setup(bot):
bot.add_cog(PrivateRooms(bot))
| 41.224944 | 100 | 0.64376 | [
"MIT"
] | Damego/Asteroid-Discord-Bot | source/cogs/private_rooms.py | 18,513 | Python |
from flask import request
from flask_restplus import Resource
from app.project.auth import auth
from app.project.auth.auth_service import AuthService
from app.project.user.user_dto import UserDto
from app.project.user.user_service import UserService
api = UserDto.api
_user = UserDto.user
@api.route('/')
class UserList(Resource):
@api.doc('list_of_registered_users')
@api.marshal_list_with(_user, envelope='data')
def get(self):
"""List all registered users"""
return UserService.get_all_users()
@auth.login_required
@AuthService.admin_permission_required
@api.response(201, 'User successfully created.')
@api.doc('create a new user(only for admin)')
@api.expect(_user, validate=True)
def post(self):
"""Creates a new User(only for admin) """
user_service = UserService()
return user_service.create_user(request.json)
@api.route('/<public_id>')
@api.param('public_id', 'The User identifier')
@api.response(404, 'User not found.')
class User(Resource):
@api.doc('get a user')
@api.marshal_with(_user)
def get(self, public_id):
"""get a user given its identifier"""
user_service = UserService()
user_service.load_user(public_id)
if user_service.is_nan_user():
api.abort(404)
else:
return user_service.get_user_public()
| 29.934783 | 53 | 0.690632 | [
"MIT"
] | makci97/lms_flask | app/project/user/user_controller.py | 1,377 | Python |
from django.core.management.base import BaseCommand
# Third-Party
import requests
class Command(BaseCommand):
help = "Command to upload from dropbox."
def add_arguments(self, parser):
parser.add_argument(
'dropbox',
nargs='?',
)
def handle(self, *args, **options):
# Parse URL input
dropbox = options['dropbox']
p1 = dropbox.partition('?')
p2 = p1[0].rpartition('/')
filename = p2[2]
url = dropbox.replace("?dl=0", "?dl=1")
# open in binary mode
with open(filename, "wb") as file:
# get request
response = requests.get(url)
# write to file
file.write(response.content)
self.stdout.write("Uploaded {0}".format(filename))
return
| 26.193548 | 58 | 0.559113 | [
"BSD-2-Clause"
] | barberscore/archive-api | project/apps/core/management/commands/upload_file.py | 812 | Python |
import setuptools
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name='amazonstoreprice',
packages=['amazonstoreprice'],
package_dir={'amazonestoreprice': 'amazonstoreprice'},
version='0.1.7',
install_requires=['requests', 'beautifulsoup4'],
description='Find the price on Amazon store starting from url',
long_description=long_description,
long_description_content_type='text/markdown',
author='Alessandro Sbarbati',
author_email='[email protected]',
url='https://github.com/Mirio/amazonstoreprice',
download_url='https://github.com/Mirio/amazonstoreprice/tarball/0.1',
keywords=['Amazon', 'amazonprice', 'amazonstoreprice', "amazonstore"],
license='BSD',
classifiers=["License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities"],
)
| 39.464286 | 80 | 0.685973 | [
"BSD-2-Clause"
] | Mirio/amazonstore-price | setup.py | 1,105 | Python |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Unitary gate."""
from warnings import warn
from typing import List, Optional, Union, Tuple
import numpy as np
from scipy.linalg import schur
from qiskit.circuit.parameter import ParameterExpression
from qiskit.circuit.exceptions import CircuitError
from .instruction import Instruction
class Gate(Instruction):
"""Unitary gate."""
def __init__(self, name: str, num_qubits: int, params: List,
label: Optional[str] = None) -> None:
"""Create a new gate.
Args:
name: The Qobj name of the gate.
num_qubits: The number of qubits the gate acts on.
params: A list of parameters.
label: An optional label for the gate.
"""
self._label = label
self.definition = None
super().__init__(name, num_qubits, 0, params)
# Set higher priority than Numpy array and matrix classes
__array_priority__ = 20
def to_matrix(self) -> np.ndarray:
"""Return a Numpy.array for the gate unitary matrix.
Returns:
np.ndarray: if the Gate subclass has a matrix definition.
Raises:
CircuitError: If a Gate subclass does not implement this method an
exception will be raised when this base class method is called.
"""
if hasattr(self, '__array__'):
# pylint: disable=no-member
return self.__array__(dtype=complex)
raise CircuitError("to_matrix not defined for this {}".format(type(self)))
def power(self, exponent: float):
"""Creates a unitary gate as `gate^exponent`.
Args:
exponent (float): Gate^exponent
Returns:
qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent.
Raises:
CircuitError: If Gate is not unitary
"""
from qiskit.quantum_info.operators import Operator # pylint: disable=cyclic-import
from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import
# Should be diagonalized because it's a unitary.
decomposition, unitary = schur(Operator(self).data, output='complex')
# Raise the diagonal entries to the specified power
decomposition_power = list()
decomposition_diagonal = decomposition.diagonal()
# assert off-diagonal are 0
if not np.allclose(np.diag(decomposition_diagonal), decomposition):
raise CircuitError('The matrix is not diagonal')
for element in decomposition_diagonal:
decomposition_power.append(pow(element, exponent))
# Then reconstruct the resulting gate.
unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T
return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent))
def _return_repeat(self, exponent: float) -> 'Gate':
return Gate(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits,
params=self.params)
def assemble(self) -> 'Instruction':
"""Assemble a QasmQobjInstruction"""
instruction = super().assemble()
if self.label:
instruction.label = self.label
return instruction
@property
def label(self) -> str:
"""Return gate label"""
return self._label
@label.setter
def label(self, name: str):
"""Set gate label to name
Args:
name (str or None): label to assign unitary
Raises:
TypeError: name is not string or None.
"""
if isinstance(name, (str, type(None))):
self._label = name
else:
raise TypeError('label expects a string or None')
def control(self, num_ctrl_qubits: Optional[int] = 1, label: Optional[str] = None,
ctrl_state: Optional[Union[int, str]] = None):
"""Return controlled version of gate. See :class:`.ControlledGate` for usage.
Args:
num_ctrl_qubits: number of controls to add to gate (default=1)
label: optional gate label
ctrl_state: The control state in decimal or as a bitstring
(e.g. '111'). If None, use 2**num_ctrl_qubits-1.
Returns:
qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm
uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size
num_qubits + 2*num_ctrl_qubits - 1.
Raises:
QiskitError: unrecognized mode or invalid ctrl_state
"""
# pylint: disable=cyclic-import
from .add_control import add_control
return add_control(self, num_ctrl_qubits, label, ctrl_state)
@staticmethod
def _broadcast_single_argument(qarg: List) -> List:
"""Expands a single argument.
For example: [q[0], q[1]] -> [q[0]], [q[1]]
"""
# [q[0], q[1]] -> [q[0]]
# -> [q[1]]
for arg0 in qarg:
yield [arg0], []
@staticmethod
def _broadcast_2_arguments(qarg0: List, qarg1: List) -> List:
if len(qarg0) == len(qarg1):
# [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]]
# -> [q[1], r[1]]
for arg0, arg1 in zip(qarg0, qarg1):
yield [arg0, arg1], []
elif len(qarg0) == 1:
# [[q[0]], [r[0], r[1]]] -> [q[0], r[0]]
# -> [q[0], r[1]]
for arg1 in qarg1:
yield [qarg0[0], arg1], []
elif len(qarg1) == 1:
# [[q[0], q[1]], [r[0]]] -> [q[0], r[0]]
# -> [q[1], r[0]]
for arg0 in qarg0:
yield [arg0, qarg1[0]], []
else:
raise CircuitError('Not sure how to combine these two-qubit arguments:\n %s\n %s' %
(qarg0, qarg1))
@staticmethod
def _broadcast_3_or_more_args(qargs: List) -> List:
if all(len(qarg) == len(qargs[0]) for qarg in qargs):
for arg in zip(*qargs):
yield list(arg), []
else:
raise CircuitError(
'Not sure how to combine these qubit arguments:\n %s\n' % qargs)
def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[List, List]:
"""Validation and handling of the arguments and its relationship.
For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This
method yields the arguments in the right grouping. In the given example::
in: [[q[0],q[1]], q[2]],[]
outs: [q[0], q[2]], []
[q[1], q[2]], []
The general broadcasting rules are:
* If len(qargs) == 1::
[q[0], q[1]] -> [q[0]],[q[1]]
* If len(qargs) == 2::
[[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]]
[[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]]
[[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]]
* If len(qargs) >= 3::
[q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...]
Args:
qargs: List of quantum bit arguments.
cargs: List of classical bit arguments.
Returns:
A tuple with single arguments.
Raises:
CircuitError: If the input is not valid. For example, the number of
arguments does not match the gate expectation.
"""
if len(qargs) != self.num_qubits or cargs:
raise CircuitError(
f'The amount of qubit({len(qargs)})/clbit({len(cargs)}) arguments does'
f' not match the gate expectation ({self.num_qubits}).')
if any(not qarg for qarg in qargs):
raise CircuitError('One or more of the arguments are empty')
if len(qargs) == 1:
return Gate._broadcast_single_argument(qargs[0])
elif len(qargs) == 2:
return Gate._broadcast_2_arguments(qargs[0], qargs[1])
elif len(qargs) >= 3:
return Gate._broadcast_3_or_more_args(qargs)
else:
raise CircuitError('This gate cannot handle %i arguments' % len(qargs))
def validate_parameter(self, parameter):
"""Gate parameters should be int, float, or ParameterExpression"""
if isinstance(parameter, ParameterExpression):
if len(parameter.parameters) > 0:
return parameter # expression has free parameters, we cannot validate it
if not parameter._symbol_expr.is_real:
raise CircuitError("Bound parameter expression is complex in gate {}".format(
self.name))
return parameter # per default assume parameters must be real when bound
if isinstance(parameter, (int, float)):
return parameter
elif isinstance(parameter, (np.integer, np.floating)):
return parameter.item()
elif isinstance(parameter, np.ndarray):
warn("Gate param type %s is being deprecated as of 0.16.0, and will be removed "
"no earlier than 3 months after that release date. "
"Considering creating your own Gate subclass with the method validate_parameter "
" to allow this param type." % type(parameter), DeprecationWarning, 3)
return parameter
else:
raise CircuitError("Invalid param type {0} for gate {1}.".format(type(parameter),
self.name))
| 39.015385 | 98 | 0.568119 | [
"Apache-2.0"
] | Blacksmith-qi/qiskit-terra | qiskit/circuit/gate.py | 10,144 | Python |
import numpy as np
def make_exp_kernel(L0):
def exp_kernel(x1, x2):
x1 = np.expand_dims(x1, 2) # Append a singleton dimension
x2 = x2.T
return np.exp(-np.mean(np.abs(x1 - x2), axis=1) / L0)
return exp_kernel
def make_sq_exp_kernel(L0):
def sq_exp_kernel(x1, x2):
x1 = np.expand_dims(x1, 2) # Append a singleton dimension
x2 = x2.T
return np.exp(-np.sum((x1 - x2)**2, axis=1) / (2 * L0**2))
return sq_exp_kernel
def weighted_neighbors_loss(train_data, valid_data, kernel):
"""Computes the negative log prob per data point."""
X_train, T_train = train_data
X_valid, T_valid = valid_data
weight_mat = kernel(X_valid, X_train)
label_probs = np.dot(weight_mat, T_train)
label_probs = label_probs / np.sum(label_probs, axis=1, keepdims=True)
mean_neg_log_prob = - np.mean(np.log(np.sum(label_probs * T_valid,
axis=1)), axis=0)
return mean_neg_log_prob
| 36.703704 | 74 | 0.634712 | [
"MIT"
] | LinZichuan/drmad | cpu_ver/hypergrad/kernel_methods.py | 991 | Python |
from chalice.app import Chalice
from chalice.app import (
ChaliceViewError, BadRequestError, UnauthorizedError, ForbiddenError,
NotFoundError, ConflictError, TooManyRequestsError
)
__version__ = '0.2.0'
| 23.666667 | 73 | 0.793427 | [
"Apache-2.0"
] | yzjamm/awstest | chalice/__init__.py | 213 | Python |
import os
import sys
import torch
import numpy as np
import datetime
import logging
import provider
import importlib
import shutil
import argparse
from pathlib import Path
from tqdm import tqdm
from data_utils.PCDLoader import *
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))
def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('training')
parser.add_argument('--use_cpu', action='store_true', default=False, help='use cpu mode')
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')
parser.add_argument('--batch_size', type=int, default=8, help='batch size in training')
parser.add_argument('--model', default='pointnet_cls', help='model name [default: pointnet_cls]')
parser.add_argument('--num_category', default=12, type=int, help='training on real dataset')
parser.add_argument('--epoch', default=20, type=int, help='number of epoch in training')
parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number')
parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training')
parser.add_argument('--log_dir', type=str, default=None, help='experiment root')
parser.add_argument('--decay_rate', type=float, default=1e-4, help='decay rate')
parser.add_argument('--use_normals', action='store_true', default=False, help='use normals')
parser.add_argument('--process_data', action='store_true', default=False, help='save data offline')
parser.add_argument('--use_uniform_sample', action='store_true', default=False, help='use uniform sampiling')
parser.add_argument('--num_sparse_point', type=int, default=50, help='Point Number for domain loss')
parser.add_argument('--random_choose_sparse', type=bool, default=False, help='Random select num_sparse_point from [10,20,30,40,50]')
parser.add_argument('--SO3_Rotation', action='store_true', default=False, help='arbitrary rotation in SO3')
parser.add_argument('--DA_method', type=str, default="multi_coral_mmd", help='choose the DA loss function')
parser.add_argument('--alpha', type=float, default=10, help='set the value of classification loss')
parser.add_argument('--lamda', type=float, default=10, help='set the value of CORAL loss')
parser.add_argument('--beta', type=float, default=10, help='set the value of MMD loss')
return parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def inplace_relu(m):
classname = m.__class__.__name__
if classname.find('ReLU') != -1:
m.inplace=True
def test(model, loader, num_class=12):
mean_correct = []
class_acc = np.zeros((num_class, 3))
classifier = model.eval()
for j, data in tqdm(enumerate(loader), total=len(loader)):
if not args.use_cpu:
points, target = data['pointcloud'].to(device).float(), data['category'].to(device)
points = points.transpose(2, 1)
pred, _ = classifier(points)
pred_choice = pred.data.max(1)[1]
for cat in np.unique(target.cpu()):
classacc = pred_choice[target == cat].eq(target[target == cat].long().data).cpu().sum()
class_acc[cat, 0] += classacc.item() / float(points[target == cat].size()[0])
class_acc[cat, 1] += 1
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item() / float(points.size()[0]))
class_acc[:, 2] = class_acc[:, 0] / class_acc[:, 1]
class_acc = np.mean(class_acc[:, 2])
instance_acc = np.mean(mean_correct)
return instance_acc, class_acc
def main(args):
def log_string(str):
logger.info(str)
print(str)
'''HYPER PARAMETER'''
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
'''CREATE DIR'''
timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
exp_dir = Path('./log/')
exp_dir.mkdir(exist_ok=True)
exp_dir = exp_dir.joinpath('classification')
exp_dir.mkdir(exist_ok=True)
if args.log_dir is None:
exp_dir = exp_dir.joinpath(timestr)
else:
exp_dir = exp_dir.joinpath(args.log_dir)
exp_dir.mkdir(exist_ok=True)
checkpoints_dir = exp_dir.joinpath('checkpoints/')
checkpoints_dir.mkdir(exist_ok=True)
log_dir = exp_dir.joinpath('logs/')
log_dir.mkdir(exist_ok=True)
'''LOG'''
args = parse_args()
logger = logging.getLogger("Model")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
log_string('PARAMETER ...')
log_string(args)
'''DATA LOADING'''
log_string('Load dataset ...')
visual_data_path = 'data/visual_data_pcd/'
tactile_data_path = 'data/tactile_pcd_10_sampled_21.02/'
train_dataset = PCDPointCloudData(visual_data_path,
folder='Train',
sample_method='Voxel',
num_point=args.num_point,
sample=True,
rotation=False,
est_normal=args.use_normals)
test_dataset = PCDPointCloudData(visual_data_path,
folder='Test',
sample_method='Voxel',
num_point=args.num_point,
sample=True,
rotation=False,
est_normal=args.use_normals)
if args.random_choose_sparse is True:
raise NotImplementedError("Function Not Implemented") # Not Implement
# domain_adaptation_dataset = PCDPointCloudData(tactile_data_path, folder='Train',
# random_num=True,
# list_num_point=[10,20,30,40,50])
else:
domain_adaptation_dataset = PCDPointCloudData(tactile_data_path,
folder='Train',
sample_method='Voxel',
num_point=args.num_sparse_point,
sample=True,
rotation=False,
est_normal=args.use_normals)
trainDataLoader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=10, drop_last=True)
domainAdaptationDataLoader = torch.utils.data.DataLoader(domain_adaptation_dataset, batch_size=args.batch_size, shuffle=True, num_workers=10, drop_last=True)
testDataLoader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=10)
'''Output middle layers'''
activation = {}
def get_activation(name):
def hook(model, input, output):
activation [name] = output.detach()
return hook
'''MODEL LOADING'''
num_class = args.num_category
model = importlib.import_module(args.model)
shutil.copy('./models/%s.py' % args.model, str(exp_dir))
shutil.copy('models/pointnet_cls.py', str(exp_dir))
shutil.copy('data_utils/PCDLoader.py', str(exp_dir))
shutil.copy('./train_realMulti-DA-Loss_classification.py', str(exp_dir))
classifier = model.get_model(num_class, normal_channel=args.use_normals)
criterion = model.get_loss()
if args.DA_method == "coral":
criterion_DA = model.get_coral_loss(DA_alpha=args.alpha, DA_lamda=args.lamda)
elif args.DA_method == "mmd":
criterion_DA = model.get_mmd_loss(DA_alpha=args.alpha, DA_lamda=args.lamda)
elif args.DA_method == "coral_mmd":
criterion_DA = model.get_coral_mmd_loss(DA_alpha=args.alpha, DA_beta=args.beta,
DA_lamda=args.lamda)
elif args.DA_method == "multi_coral_mmd":
criterion_DA = model.get_multiLayer_loss(DA_alpha=args.alpha, DA_beta=args.beta,
DA_lamda=args.lamda)
else:
raise NameError("Wrong input for DA method name!")
classifier.apply(inplace_relu)
if not args.use_cpu:
classifier = classifier.cuda()
criterion = criterion.cuda()
criterion_DA = criterion_DA.cuda()
# Load pretrained model with real dataset
try:
checkpoint = torch.load(str(exp_dir) + '/checkpoints/best_model.pth')
start_epoch = checkpoint['epoch']
classifier.load_state_dict(checkpoint['model_state_dict'])
log_string('Use pretrain model')
except:
log_string('No existing model, starting training from scratch...')
start_epoch = 0
try:
min_loss = checkpoint['loss']
log_string('Loading model with DA loss %f' % min_loss)
except:
log_string('No DA loss found in the model')
min_loss = 10000.0
if args.optimizer == 'Adam':
optimizer = torch.optim.Adam(
classifier.parameters(),
lr=args.learning_rate,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=args.decay_rate
)
else:
optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7)
global_epoch = 0
global_step = 0
best_instance_acc = 0.0
best_class_acc = 0.0
running_loss = 0.0
'''TRANING'''
logger.info('Start training...')
end_epoch = start_epoch + args.epoch
print("start epoch: ", start_epoch)
print("end epoch: ", end_epoch)
for epoch in range(start_epoch, end_epoch):
log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, end_epoch))
mean_correct = []
# Freeze Conv
for name, param in classifier.named_parameters():
if "feat" in name:
param.requires_grad = False
scheduler.step()
for batch_id, (data, data_DA) in tqdm(
enumerate(zip(trainDataLoader,domainAdaptationDataLoader), 0),
total=len(trainDataLoader),
smoothing=0.9):
optimizer.zero_grad()
points, target = data['pointcloud'].to(device).float(), data['category'].to(device)
points_DA = data_DA['pointcloud'].to(device).float()
points = points.data.cpu().numpy()
points = provider.random_point_dropout(points)
points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :, 0:3])
points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
points = torch.Tensor(points)
points = points.transpose(2, 1)
points_DA = points_DA.data.cpu().numpy()
points_DA = provider.random_point_dropout(points_DA)
points_DA[:, :, 0:3] = provider.random_scale_point_cloud(points_DA[:, :, 0:3])
points_DA[:, :, 0:3] = provider.shift_point_cloud(points_DA[:, :, 0:3])
points_DA = torch.Tensor(points_DA)
points_DA = points_DA.transpose(2, 1)
if not args.use_cpu:
points, target = points.cuda(), target.cuda()
points_DA = points_DA.cuda()
pred, trans_feat = classifier(points)
# Multi-layer Loss
###############################################################################################
# FC1
classifier.fc1.register_forward_hook(get_activation('fc1'))
output_dense_1 = classifier(points)
feature_dense_1 = activation['fc1']
# print(feature_dense_1.size())
classifier.fc1.register_forward_hook(get_activation('fc1'))
output_DA_1 = classifier(points_DA)
feature_DA_1 = activation['fc1']
# print(feature_DA_1.size())
# FC2
classifier.fc2.register_forward_hook(get_activation('fc2'))
output_dense_2 = classifier(points)
feature_dense_2 = activation['fc2']
# print(feature_dense_2.size())
classifier.fc2.register_forward_hook(get_activation('fc2'))
output_DA_2 = classifier(points_DA)
feature_DA_2 = activation['fc2']
# print(feature_DA_2.size())
# change the loss here for testing!!!
DA_loss, loss = criterion_DA(pred, target.long(), trans_feat,
feature_dense_1, feature_DA_1, feature_dense_2, feature_DA_2)
################################################################################################
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item() / float(points.size()[0]))
loss.backward()
optimizer.step()
global_step += 1
# Print the loss
running_loss += DA_loss.item()
if batch_id % 100 == 99:
# log_string("fc1 {}".format(classifier.fc1.weight.grad))
# log_string("fc2 {}".format(classifier.fc2.weight.grad))
# log_string("fc3 {}".format(classifier.fc3.weight.grad))
# print("Training loss {} ".format(loss.item()/100))
calculate_loss = running_loss/100
log_string("Running DA loss {} ".format(calculate_loss))
if calculate_loss < min_loss:
min_loss = calculate_loss
logger.info('Save model...')
savepath = str(checkpoints_dir) + '/best_model.pth'
log_string('Saving at %s' % savepath)
state = {
'epoch': epoch,
# 'instance_acc': instance_acc,
# 'class_acc': class_acc,
'loss': calculate_loss,
'model_state_dict': classifier.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
torch.save(state, savepath)
running_loss = 0.0
train_instance_acc = np.mean(mean_correct)
log_string('Train Instance Accuracy: %f' % train_instance_acc)
with torch.no_grad():
instance_acc, class_acc = test(classifier.eval(), testDataLoader, num_class=num_class)
if (instance_acc >= best_instance_acc):
best_instance_acc = instance_acc
best_epoch = epoch + 1
if (class_acc >= best_class_acc):
best_class_acc = class_acc
log_string('Test Instance Accuracy: %f, Class Accuracy: %f' % (instance_acc, class_acc))
log_string('Best Instance Accuracy: %f, Class Accuracy: %f' % (best_instance_acc, best_class_acc))
if (instance_acc >= best_instance_acc):
# logger.info('Save model...')
# # print("This is a better model, but the model will not be saved")
logger.info('Model will not be saved with vision validation')
# savepath = str(checkpoints_dir) + '/best_model.pth'
# log_string('Saving at %s' % savepath)
# state = {
# 'epoch': best_epoch,
# 'instance_acc': instance_acc,
# 'class_acc': class_acc,
# 'model_state_dict': classifier.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# }
# torch.save(state, savepath)
global_epoch += 1
logger.info('End of training...')
if __name__ == '__main__':
# torch.cuda.empty_cache()
args = parse_args()
main(args)
| 42.536458 | 161 | 0.588772 | [
"MIT"
] | congw112358/Pointnet_Pointnet2_pytorch | train_realMulti-DA-Loss_classification.py | 16,334 | Python |
# Copyright 2020 Determined AI. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pathlib
from typing import List, Optional
import yogadl
class LMDBDataRef(yogadl.DataRef):
def __init__(self, cache_filepath: pathlib.Path):
self._lmdb_access = yogadl.LmdbAccess(lmdb_path=cache_filepath)
self._keys = self._lmdb_access.get_keys()
def stream(
self,
start_offset: int = 0,
shuffle: bool = False,
skip_shuffle_at_epoch_end: bool = False,
shuffle_seed: Optional[int] = None,
shard_rank: int = 0,
num_shards: int = 1,
drop_shard_remainder: bool = False,
) -> yogadl.Stream:
"""
Create a stream from a cache.
"""
if shuffle and not skip_shuffle_at_epoch_end:
assert shuffle_seed is not None, (
"Please set `shuffle_seed` if enabling `shuffle` and not enabling "
"`skip_shuffle_at_epoch_end`."
)
generated_keys = self._shard_keys(
shard_rank=shard_rank,
num_shards=num_shards,
drop_shard_remainder=drop_shard_remainder,
)
generator_from_keys = yogadl.GeneratorFromKeys(
keys=generated_keys,
initial_offset=start_offset,
read_val_from_key_fn=self._lmdb_access.read_value_by_key,
shuffle_at_start=shuffle,
shuffle_after_epoch=shuffle and not skip_shuffle_at_epoch_end,
shuffle_seed=shuffle_seed,
)
return yogadl.Stream(
iterator_fn=generator_from_keys.instantiate_generator,
length=len(generated_keys),
output_types=self._lmdb_access.get_types(),
output_shapes=self._lmdb_access.get_shapes(),
)
def __len__(self) -> int:
return len(self._keys)
def _shard_keys(
self, shard_rank: int, num_shards: int, drop_shard_remainder: bool
) -> List[bytes]:
generated_keys = yogadl.shard_keys(
keys=self._keys,
shard_index=shard_rank,
num_shards=num_shards,
sequential=False,
drop_shard_remainder=drop_shard_remainder,
)
return generated_keys
| 34.536585 | 83 | 0.635946 | [
"Apache-2.0"
] | determined-ai/yogadl | yogadl/dataref/_local_lmdb_dataref.py | 2,832 | Python |
# coding: utf-8
import pprint
import re # noqa: F401
import six
class UserNotificationSubject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'str',
'url': 'str',
'latest_comment_url': 'str',
'type': 'str'
}
attribute_map = {
'title': 'title',
'url': 'url',
'latest_comment_url': 'latest_comment_url',
'type': 'type'
}
def __init__(self, title=None, url=None, latest_comment_url=None, type=None): # noqa: E501
"""UserNotificationSubject - a model defined in Swagger""" # noqa: E501
self._title = None
self._url = None
self._latest_comment_url = None
self._type = None
self.discriminator = None
if title is not None:
self.title = title
if url is not None:
self.url = url
if latest_comment_url is not None:
self.latest_comment_url = latest_comment_url
if type is not None:
self.type = type
@property
def title(self):
"""Gets the title of this UserNotificationSubject. # noqa: E501
:return: The title of this UserNotificationSubject. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this UserNotificationSubject.
:param title: The title of this UserNotificationSubject. # noqa: E501
:type: str
"""
self._title = title
@property
def url(self):
"""Gets the url of this UserNotificationSubject. # noqa: E501
:return: The url of this UserNotificationSubject. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this UserNotificationSubject.
:param url: The url of this UserNotificationSubject. # noqa: E501
:type: str
"""
self._url = url
@property
def latest_comment_url(self):
"""Gets the latest_comment_url of this UserNotificationSubject. # noqa: E501
:return: The latest_comment_url of this UserNotificationSubject. # noqa: E501
:rtype: str
"""
return self._latest_comment_url
@latest_comment_url.setter
def latest_comment_url(self, latest_comment_url):
"""Sets the latest_comment_url of this UserNotificationSubject.
:param latest_comment_url: The latest_comment_url of this UserNotificationSubject. # noqa: E501
:type: str
"""
self._latest_comment_url = latest_comment_url
@property
def type(self):
"""Gets the type of this UserNotificationSubject. # noqa: E501
:return: The type of this UserNotificationSubject. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this UserNotificationSubject.
:param type: The type of this UserNotificationSubject. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UserNotificationSubject, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserNotificationSubject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.60221 | 104 | 0.572858 | [
"MIT"
] | pygitee/pygitee | gitee/models/user_notification_subject.py | 4,996 | Python |
import datetime as dt
import matplotlib.pyplot as plt
import lifetimes
import numpy as np
import os
import pandas as pd
import seaborn as sns
def numcard(x):
return x.nunique(), len(x)
def todateclean(x):
return pd.to_datetime(x, errors='coerce').dt.date.astype('datetime64')
"""
- info, shape, dtypes
- df.isnull().sum() #Check for null counts/ value_counts()
- Check for supposed imputed values (are there suspicious values of 0, like for Age. )
- change zeros to nans where appropriate
- Imputation of missing values
- handle stringified json
- df.dtypes # in case obj to (df.colname = df.colname.astype("category"))
- df['colname'] = pd.to_datetime(df['colname']).dt.date
- df.drop("colname", axis=1) # drop columns
- How balanced are the outcomes?
X = df.drop("diagnosis", axis=1) # just saying which axis again
Y = df["diagnosis"] # this is just a series now
col = X.columns # if we do type(col), it's an Index
X.isnull().sum() # this covers every column in the df.
def rangenorm(x):
return (x - x.mean())/(x.max() - x.min())
le = LabelEncoder()
le.fit(Y_norm)
"""
df = pd.read_csv("./ignoreland/onlineretail.csv")
df.info()
df.apply(lambda x: numcard(x))
datecols = ['InvoiceDate']
df.loc[:, datecols] = df.loc[:,datecols].apply(lambda x: todateclean(x))
dfnew = df[(df.Quantity>0) & (df.CustomerID.isnull()==False)]
dfnew['amt'] = dfnew['Quantity'] * dfnew['UnitPrice']
dfnew.describe()
from lifetimes.plotting import *
from lifetimes.utils import *
observation_period_end = '2011-12-09'
monetary_value_col = 'amt'
modeldata = summary_data_from_transaction_data(dfnew,
'CustomerID',
'InvoiceDate',
monetary_value_col=monetary_value_col,
observation_period_end=observation_period_end)
modeldata.head()
modeldata.info() # 4 floats.
# Eyeball distribution of frequency (calculated)
modeldata['frequency'].plot(kind='hist', bins=50)
print(modeldata['frequency'].describe())
print(modeldata['recency'].describe())
print(sum(modeldata['frequency'] == 0)/float(len(modeldata)))
##### Lec21
from lifetimes import BetaGeoFitter
# similar to lifelines
bgf = BetaGeoFitter(penalizer_coef=0.0) # no regularization param.
bgf.fit(modeldata['frequency'], modeldata['recency'], modeldata['T'])
print(bgf)
# See https://www.youtube.com/watch?v=guj2gVEEx4s and
# https://www.youtube.com/watch?v=gx6oHqpRgpY
## residual lifetime value is more useful construct
from lifetimes.plotting import plot_frequency_recency_matrix
plot_frequency_recency_matrix(bgf)
from lifetimes.plotting import plot_probability_alive_matrix
plot_probability_alive_matrix(bgf)
# lec 24:
# set an outer time boundary and predict cumulative purchases by that time
t = 10 # from now until now+t periods
modeldata['predicted_purchases'] = \
bgf.conditional_expected_number_of_purchases_up_to_time(t,
modeldata['frequency'],
modeldata['recency'],
modeldata['T'])
modeldata.sort_values(by='predicted_purchases').tail(5)
modeldata.sort_values(by='predicted_purchases').head(5)
# lec 25: validation of model
from lifetimes.plotting import plot_period_transactions
plot_period_transactions(bgf) # this plot shows very clearly the model performance
# in terms of transaction volume fit
# Lec 26: splitting into train and test (by time period)
summary_cal_holdout = calibration_and_holdout_data(df,
'CustomerID',
'InvoiceDate',
calibration_period_end='2011-06-08',
observation_period_end='2011-12-09')
summary_cal_holdout.head()
bgf.fit(summary_cal_holdout['frequency_cal'],
summary_cal_holdout['recency_cal'],
summary_cal_holdout['T_cal'])
from lifetimes.plotting import plot_calibration_purchases_vs_holdout_purchases
plot_calibration_purchases_vs_holdout_purchases(bgf, summary_cal_holdout)
from lifetimes.plotting import plot_history_alive
days_since_birth = 365
fig = plt.figure(figsize=(12,8))
id = 14621 # choose a customer id
sp_trans = df.loc[df['CustomerID'] == id] # specific customer's covariates
plot_history_alive(bgf, days_since_birth, sp_trans, 'InvoiceDate')
# Lec28: Subsetting to customers who repurchase.
returning_customers_summary = modeldata[modeldata['frequency']>0]
returning_customers_summary.head()
returning_customers_summary.shape
# Lec 29: gamma-gamma model for LTV
# Note: good practice to confirm small/no apparent corr for frequency and mean trxn value
# Rev per trxn: predict total monetary value.
# The Beta param for the gamma model of total spend is itself assumed gamma distributed
# that is where the name comes from.
# teh expectation of total spend for person i is calculated in empirical-bayes fashion, as a weighted
# mean of population average and the sample mean for person i.
# eq 5 in http://www.brucehardie.com/notes/025/gamma_gamma.pdf shows the arithmetic
# https://antonsruberts.github.io/lifetimes-CLV/ also great additional code.
# derivation here: http://www.brucehardie.com/notes/025/gamma_gamma.pdf
# Output of ggf fitter:
# p = the 'alpha' param in the gamma dist: E(Z|p, v) = p/v. Alpha adds upon convolution.
# q = the alpha param in the gamma dist of v -- v is gamma(q, gam) in the pop
# v = the 'beta' param in gamma dist. constant upon convolution.
# -- Note that v varies among customers (ie, is gamma distributed)
from lifetimes import GammaGammaFitter
ggf = GammaGammaFitter(penalizer_coef=0.0)
ggf.fit(returning_customers_summary['frequency'],
returning_customers_summary['monetary_value'])
ggf.summary
ggf.conditional_expected_average_profit(modeldata['frequency'],
modeldata['monetary_value'])
# cond_exp_avg_profit => gives prediction of mean trxn value.
a0 = returning_customers_summary['monetary_value'].shape[0] # 2790 customers
# Total spend:
a1 = returning_customers_summary['monetary_value'].sum()
# Total time units (here, days) with purchase:
a2 = returning_customers_summary['frequency'].sum()
# Mean monetary value (over all purchase days), roughly equal to estimated v
returning_customers_summary['monetary_value'].mean()
ggf.summary
p_here = ggf.summary.iloc[0,0]
q_here = ggf.summary.iloc[1,0]
v_here = ggf.summary.iloc[2,0] # model says 486; empirical average is 477.
money_per_customer = a1/a0
###############
# review, per documentation:
bgf.summary
# r, alpha = shape, scale for gamma dist that represents sum (convolution) of purchase rates
# a = alpha param for beta dist of churn
# b = beta param for beta dist of churn
x = np.random.gamma(.784, 49.28,10000) # r, alpha, n
bgf.summary.loc["a",:][0]/ (bgf.summary.loc["b",:][0] + bgf.summary.loc["a",:][0])
###################################
# lec31: other models
dfnew.dtypes
dfnew_train = dfnew[dfnew.InvoiceDate < '2011-11-09']
dfnew_test = dfnew[dfnew.InvoiceDate >= '2011-11-09']
dfnew_test.shape
dfnew_train.shape
maxdate = dfnew_train.InvoiceDate.max()
mindate = dfnew_train.InvoiceDate.min()
dfnew_train['duration'] = (maxdate - dfnew_train.InvoiceDate)/np.timedelta64(1,'D')
dfsum1 = dfnew_train.groupby(['CustomerID'])['duration'].min().reset_index()
dfsum1.rename(columns = {'duration':'lasttime'}, inplace=True) # time from lasttime to now
dfsum2 = dfnew_train.groupby(['CustomerID'])['duration'].max().reset_index()
dfsum2.rename(columns = {'duration':'firsttime'}, inplace=True) # time from firsttime to now
dfnew_train['freq'] = 1
dfsum3 = dfnew_train.groupby(['CustomerID'])['freq'].sum().reset_index() # count of transactions by customer
dfnew_train['freq3m'] = 1
dfsum4 = dfnew_train[dfnew_train['duration'] < 91].groupby(['CustomerID'])['freq3m'].sum().reset_index()
# now let's merge the 3 customer-level datasets together.
# pd.concat uses indexes as the join keys,
from functools import reduce
dfs = [dfsum1, dfsum2, dfsum3, dfsum4]
dfsum = reduce(lambda left, right: pd.merge(left, right, on=['CustomerID'], how='outer'), dfs)
dfsum.shape
[_ for _ in map(lambda x: x.shape, dfs)]
dfsum.head()
###################
other_data = pd.read_csv("./ignoreland/oth.csv")
other_data.head()
dfsum = pd.merge(dfsum, other_data, on=['CustomerID'], how='left')
dfnew_test['target'] = 1
dfsum_target = dfnew_test.groupby(['CustomerID'])['target'].sum().reset_index()
dfsum = pd.merge(dfsum, dfsum_target, on=['CustomerID'], how='left')
dfsum = dfsum.fillna(0).sort_values(['target'], ascending=False)
list(dfsum.columns)
# Lec 35 Xgboost
"""
reduce(Create tree, use tree to predict residuals, add.)
lightgbm is a faster implementation
"""
# lec36:
# Use xgboost to model the count of transactions per customer
import xgboost
from sklearn.model_selection import train_test_split
xgb_model = xgboost.XGBRegressor(n_estimators=2000, objective='reg:squarederror', max_depth=5)
predictors = ['lasttime', 'firsttime', 'freq', 'freq3m', 'score', 'discount']
X = dfsum[predictors]
y = dfsum['target']
# Split x, x, y, y | train, test; give test frac and random state
x_train, x_valid, y_train, y_valid = train_test_split(X, y, test_size=0.32, random_state=867)
xgb_model.fit(x_train, y_train)
pred = xgb_model.predict(x_valid) # vector of predicted
err = (pred - y_valid)**2 # squared errors
mse = err.sum()/len(err)
rmse = np.sqrt(mse)
from xgboost import plot_importance
x = list(zip(predictors, xgb_model.feature_importances_))
x.sort(key=lambda x: -x[1])
x
plot_importance(xgb_model)
# https://towardsdatascience.com/interpretable-machine-learning-with-xgboost-9ec80d148d27
### Some global measures of xgboost feature importance:
# weight: number of times feature is used to split data (over all trees)
# cover: weight, weighted by data points being touched by those splits
# gain: mean training loss reduction (reduction in test-train) when the feature is used.
# argsort here returns the indices of the (reverse-sorted) feature importance values.
# Useful for grabbing index values and then working with arbitrarily zipped other lists (as I did above)
sorted_idx = np.argsort(xgb_model.feature_importances_)[::-1]
for _ in sorted_idx:
print([x_train.columns[_], xgb_model.feature_importances_[_]])
[_ for _ in map(lambda x: xgb_model.get_booster().get_score(importance_type=x),
['gain','weight','cover','total_gain','total_cover'])]
def importances(model, lst):
output = {}
for x in lst:
output[x] = model.get_booster().get_score(importance_type=x).values()
return pd.concat([pd.Series(model.get_booster().feature_names), pd.DataFrame(output, columns=lst)],
axis=1)
aa = importances(xgb_model,['gain','weight','cover','total_gain','total_cover'])
aa
pd.concat([pd.Series(xgb_model.get_booster().feature_names), aa], axis=1)
##################
# using lightgbm:
import lightgbm as lgb
lgbparams = {
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'mse',
'max_depth': 6,
'learning_rate': 0.02,
}
X1, X2, y1, y2 = train_test_split(X, y, test_size=0.32, random_state=867)
x_train, x_valid, y_train, y_valid = train_test_split(X1, y1, test_size=0.1, random_state=867)
x_train = x_train[predictors]
x_valid = x_valid[predictors]
d_train = lgb.Dataset(x_train, label=y_train)
d_valid = lgb.Dataset(x_valid, label=y_valid)
watchlist = [d_valid]
n_estimators = 2000
lightmodel = lgb.train(lgbparams, d_train, n_estimators, watchlist, verbose_eval=1)
importancelist = ['gain','split']
lightmodel.feature_importance(importance_type=importancelist[0])
importancdf = pd.DataFrame(pd.Series(predictors), columns=['feature'])
importancedf = reduce(lambda left, right: pd.concat([left, right], axis=1),
[pd.Series(lightmodel.feature_importance(_)) for _ in importancelist])
importancedf.corr()
"""
frequency = number of periods in which a non-first purchase was made
T = age in same units of each customer
recency = period[last purchase] - period[first purchase]
monetary_value = sum(money)/(frequency+1)
# use utility functions to aggregate into useable format.
# https://lifetimes.readthedocs.io/en/latest/More%20examples%20and%20recipes.html
# sql examples for aggregating into RFM and doing holdout split.
"""
"""
Also, per brucehardie,
The integrated (function of 2 functions) nature of these problems yields to
The gaussian hypergeometric function trick for evaluating the double integral.
"""
| 38.88685 | 109 | 0.71005 | [
"MIT"
] | cbymar/clv-modeling | clvscript00.py | 12,716 | Python |
# Copyright 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fcntl
import hashlib
import os
import shutil
import subprocess
import sys
# Allow use of this module even if termcolor is missing. There are many
# standalone python scripts in build_tools that can be run directly without
# PYTHONPATH set (i.e. not via build/python_wrapper that adds this path.
# TODO(sbc): we should probably just assume that all the module dependencies
# are present.
try:
import termcolor
except ImportError:
termcolor = None
from webports import error, paths
GS_URL = 'http://storage.googleapis.com/'
GS_BUCKET = 'webports'
GS_MIRROR_URL = '%s%s/mirror' % (GS_URL, GS_BUCKET)
# Require the latest version of the NaCl SDK. webports is built
# and tested against the pepper_canary release. To build aginst older
# versions of the SDK use the one of the pepper_XX branches (or use
# --skip-sdk-version-check).
MIN_SDK_VERSION = 49
arch_to_pkgarch = {
'x86_64': 'x86-64',
'i686': 'i686',
'arm': 'arm',
'pnacl': 'pnacl',
'emscripten': 'emscripten',
'le32': 'le32'
}
# Inverse of arch_to_pkgarch
pkgarch_to_arch = {v: k for k, v in arch_to_pkgarch.items()}
LOG_ERROR = 0
LOG_WARN = 1
LOG_INFO = 2
LOG_VERBOSE = 3
LOG_TRACE = 4
ELF_MAGIC = '\x7fELF'
PEXE_MAGIC = 'PEXE'
log_level = LOG_INFO
color_mode = 'auto'
def colorize(message, color):
if termcolor and colorize.enabled:
return termcolor.colored(message, color)
else:
return message
def check_stdout_for_color_support():
if color_mode == 'auto':
colorize.enabled = sys.stdout.isatty()
def is_elf_file(filename):
if os.path.islink(filename):
return False
with open(filename) as f:
header = f.read(4)
return header == ELF_MAGIC
def is_pexe_file(filename):
if os.path.islink(filename):
return False
with open(filename) as f:
header = f.read(4)
return header == PEXE_MAGIC
def memoize(f):
"""Memoization decorator for functions taking one or more arguments."""
class Memo(dict):
def __init__(self, f):
super(Memo, self).__init__()
self.f = f
def __call__(self, *args):
return self[args]
def __missing__(self, key):
ret = self[key] = self.f(*key)
return ret
return Memo(f)
def set_verbose(enabled):
if enabled:
set_log_level(LOG_VERBOSE)
else:
set_log_level(LOG_INFO)
def set_log_level(verbosity):
global log_level
log_level = verbosity
def log(message, verbosity=LOG_INFO):
"""Log a message to the console (stdout)."""
if log_level < verbosity:
return
sys.stdout.write(str(message) + '\n')
sys.stdout.flush()
def log_heading(message, suffix=''):
"""Log a colored/highlighted message with optional suffix."""
if colorize.enabled:
log(colorize(message, 'green') + suffix)
else:
if log_level > LOG_WARN:
# When running in verbose mode make sure heading standout
log('###################################################################')
log(message + suffix)
log('###################################################################')
else:
log(message + suffix)
def warn(message):
log('warning: ' + message, LOG_WARN)
def trace(message):
log(message, LOG_TRACE)
def log_verbose(message):
log(message, LOG_VERBOSE)
def find_in_path(command_name):
"""Search user's PATH for a given executable.
Returns:
Full path to executable.
"""
extensions = ('',)
if not os.path.splitext(command_name)[1] and os.name == 'nt':
extensions = ('.bat', '.com', '.exe')
for path in os.environ.get('PATH', '').split(os.pathsep):
for ext in extensions:
full_name = os.path.join(path, command_name + ext)
if os.path.exists(full_name) and os.path.isfile(full_name):
return full_name
raise error.Error('command not found: %s' % command_name)
def download_file(filename, url):
"""Download a file from a given URL.
Args:
filename: the name of the file to download the URL to.
url: then URL to fetch.
"""
temp_filename = filename + '.partial'
# Ensure curl is in user's PATH
find_in_path('curl')
curl_cmd = ['curl', '--fail', '--location', '--stderr', '-', '-o',
temp_filename]
if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()):
# Add --progress-bar but only if stdout is a TTY device.
curl_cmd.append('--progress-bar')
else:
# otherwise suppress status output, since curl always assumes its
# talking to a TTY and writes \r and \b characters. But add
# --show-error so that when curl fails it at least prints something.
curl_cmd += ['--silent', '--show-error']
curl_cmd.append(url)
if log_level > LOG_WARN:
log('Downloading: %s [%s]' % (url, filename))
else:
log('Downloading: %s' % url.replace(GS_URL, ''))
try:
subprocess.check_call(curl_cmd)
except subprocess.CalledProcessError as e:
raise error.Error('Error downloading file: %s' % str(e))
os.rename(temp_filename, filename)
def check_stamp(filename, contents=None):
"""Check that a given stamp file is up-to-date.
Returns: False is the file does not exists or is older that that given
comparison file, or does not contain the given contents. True otherwise.
"""
if not os.path.exists(filename):
return False
if contents is not None:
with open(filename) as f:
if not f.read().startswith(contents):
return False
return True
@memoize
def get_sdk_root():
"""Returns the root of the currently configured Native Client SDK."""
root = os.environ.get('NACL_SDK_ROOT')
if root is None:
local_sdk_root = os.path.join(paths.OUT_DIR, 'nacl_sdk')
if os.path.exists(local_sdk_root):
root = local_sdk_root
else:
raise error.Error('$NACL_SDK_ROOT not set')
if sys.platform == "cygwin":
root = root.replace('\\', '/')
return root
@memoize
def get_emscripten_root():
emscripten = os.environ.get('EMSCRIPTEN')
if emscripten is None:
local_root = os.path.join(paths.OUT_DIR, 'emsdk', 'emscripten')
if os.path.exists(local_root):
emscripten = local_root
else:
raise error.Error('$EMSCRIPTEN not set and %s does not exist.' %
local_root)
if not os.path.isdir(emscripten):
raise error.Error('$EMSCRIPTEN environment variable does not point'
' to a directory: %s' % emscripten)
return emscripten
def setup_emscripten():
if 'EMSCRIPTEN' in os.environ:
return
local_root = get_emscripten_root()
os.environ['EMSCRIPTEN'] = local_root
os.environ['EM_CONFIG'] = os.path.join(
os.path.dirname(local_root), '.emscripten')
try:
find_in_path('node')
except error.Error:
node_bin = os.path.join(paths.OUT_DIR, 'node', 'bin')
if not os.path.isdir(node_bin):
raise error.Error(
'node not found in path and default path not found: %s' % node_bin)
os.environ['PATH'] += ':' + node_bin
find_in_path('node')
@memoize
def get_sdk_version():
"""Returns the version (as a string) of the current SDK."""
getos = os.path.join(get_sdk_root(), 'tools', 'getos.py')
version = subprocess.check_output([getos, '--sdk-version']).strip()
return version
def check_sdk_version(version):
"""Returns True if the currently configured SDK is 'version' or above."""
return int(get_sdk_version()) >= int(version)
@memoize
def get_sdk_revision():
"""Returns the revision of the currently configured Native Client SDK."""
getos = os.path.join(get_sdk_root(), 'tools', 'getos.py')
version = subprocess.check_output([getos, '--sdk-revision']).strip()
return int(version)
@memoize
def get_platform():
"""Returns the current platform name according getos.py."""
getos = os.path.join(get_sdk_root(), 'tools', 'getos.py')
platform = subprocess.check_output([getos]).strip()
return platform
@memoize
def get_toolchain_root(config):
"""Returns the toolchain folder for a given NaCl toolchain."""
if config.toolchain == 'emscripten':
return get_emscripten_root()
platform = get_platform()
if config.toolchain in ('pnacl', 'clang-newlib'):
tc_dir = os.path.join('%s_pnacl' % platform)
else:
tc_arch = {'arm': 'arm', 'i686': 'x86', 'x86_64': 'x86'}[config.arch]
tc_dir = '%s_%s_%s' % (platform, tc_arch, config.libc)
return os.path.join(get_sdk_root(), 'toolchain', tc_dir)
@memoize
def get_install_root(config):
"""Returns the install location given a build configuration."""
tc_dir = get_toolchain_root(config)
if config.toolchain == 'emscripten':
return os.path.join(tc_dir, 'system', 'local')
if config.toolchain == 'pnacl':
tc_dir = os.path.join(tc_dir, 'le32-nacl')
else:
tc_dir = os.path.join(tc_dir, '%s-nacl' % config.arch)
return os.path.join(tc_dir, 'usr')
@memoize
def get_install_stamp_root(config):
"""Returns the installation metadata folder for the give configuration."""
tc_root = get_install_root(config)
return os.path.join(tc_root, 'var', 'lib', 'npkg')
@memoize
def get_strip(config):
tc_dir = get_toolchain_root(config)
if config.toolchain == 'pnacl':
strip = os.path.join(tc_dir, 'bin', 'pnacl-strip')
else:
strip = os.path.join(tc_dir, 'bin', '%s-nacl-strip' % config.arch)
assert os.path.exists(strip), 'strip executable not found: %s' % strip
return strip
def get_install_stamp(package_name, config):
"""Returns the filename of the install stamp for for a given package.
This file is written at install time and contains metadata
about the installed package.
"""
root = get_install_stamp_root(config)
return os.path.join(root, package_name + '.info')
def get_list_file(package_name, config):
"""Returns the filename of the list of installed files for a given package.
This file is written at install time.
"""
root = get_install_stamp_root(config)
return os.path.join(root, package_name + '.list')
def is_installed(package_name, config, stamp_content=None):
"""Returns True if the given package is installed."""
stamp = get_install_stamp(package_name, config)
result = check_stamp(stamp, stamp_content)
return result
def check_sdk_root():
"""Check validity of NACL_SDK_ROOT."""
root = get_sdk_root()
if not os.path.isdir(root):
raise error.Error('$NACL_SDK_ROOT does not exist: %s' % root)
landmark = os.path.join(root, 'tools', 'getos.py')
if not os.path.exists(landmark):
raise error.Error("$NACL_SDK_ROOT (%s) doesn't look right. "
"Couldn't find landmark file (%s)" % (root, landmark))
if not check_sdk_version(MIN_SDK_VERSION):
raise error.Error(
'This version of webports requires at least version %s of\n'
'the NaCl SDK. The version in $NACL_SDK_ROOT is %s. If you want\n'
'to use webports with an older version of the SDK please checkout\n'
'one of the pepper_XX branches (or run with\n'
'--skip-sdk-version-check).' % (MIN_SDK_VERSION, get_sdk_version()))
def hash_file(filename):
"""Return the SHA1 (in hex format) of the contents of the given file."""
block_size = 100 * 1024
sha1 = hashlib.sha1()
with open(filename) as f:
while True:
data = f.read(block_size)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
class HashVerificationError(error.Error):
pass
def verify_hash(filename, sha1):
"""Return True if the sha1 of the given file match the sha1 passed in."""
file_sha1 = hash_file(filename)
if sha1 != file_sha1:
raise HashVerificationError(
'verification failed: %s\nExpected: %s\nActual: %s' %
(filename, sha1, file_sha1))
def remove_tree(directory):
"""Recursively remove a directory and its contents."""
if not os.path.exists(directory):
return
if not os.path.isdir(directory):
raise error.Error('RemoveTree: not a directory: %s', directory)
shutil.rmtree(directory)
def rel_path(filename):
"""Return a pathname relative to the root the webports src tree.
This is used mostly to make output more readable when printing filenames."""
return os.path.relpath(filename, paths.NACLPORTS_ROOT)
def makedirs(directory):
if os.path.isdir(directory):
return
if os.path.exists(directory):
raise error.Error('mkdir: File exists and is not a directory: %s' %
directory)
trace("mkdir: %s" % directory)
os.makedirs(directory)
class DirLock(object):
"""Per-directory flock()-based context manager
This class will raise an exception if another process already holds the
lock for the given directory.
"""
def __init__(self, lock_dir):
if not os.path.exists(lock_dir):
makedirs(lock_dir)
self.file_name = os.path.join(lock_dir, 'webports.lock')
self.fd = open(self.file_name, 'w')
def __enter__(self):
try:
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception:
raise error.Error("Unable to acquire lock (%s): Is webports already "
"running?" % self.file_name)
def __exit__(self, exc_type, exc_val, exc_tb):
os.remove(self.file_name)
self.fd.close()
class BuildLock(DirLock):
"""Lock used when building a package (essentially a lock on OUT_DIR)"""
def __init__(self):
super(BuildLock, self).__init__(paths.OUT_DIR)
class InstallLock(DirLock):
"""Lock used when installing/uninstalling package"""
def __init__(self, config):
root = get_install_root(config)
super(InstallLock, self).__init__(root)
check_stdout_for_color_support()
| 27.70122 | 80 | 0.678553 | [
"BSD-3-Clause"
] | DiamondLovesYou/webports | lib/webports/util.py | 13,629 | Python |
from . import sean_common as common
import torch.nn as nn
import torch
from basicsr.utils.registry import ARCH_REGISTRY
class LFF(nn.Module):
def __init__(self, scale, n_colors, conv=common.default_conv, n_feats=64):
super(LFF, self).__init__()
kernel_size = 3
n_layes = 5
act = nn.ReLU(True)
m_head = [conv(3, n_feats, kernel_size)]
m_body = [
conv(
n_feats, n_feats, kernel_size
) for _ in range(n_layes)
]
m_tail = [
common.Upsampler(conv, scale, n_feats, act=False),
nn.Conv2d(
n_feats, n_colors, kernel_size,
padding=(kernel_size // 2)
)
]
self.LLF_head = nn.Sequential(*m_head)
self.LLF_body = nn.Sequential(*m_body)
self.LLF_tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.LLF_head(x)
x = self.LLF_body(x)
x = self.LLF_tail(x)
return x
class MSRB(nn.Module):
def __init__(self, conv=common.default_conv):
super(MSRB, self).__init__()
n_feats = 64
kernel_size_1 = 3
kernel_size_2 = 5
self.conv_3_1 = conv(n_feats, n_feats, kernel_size_1)
self.conv_3_2 = conv(n_feats * 2, n_feats * 2, kernel_size_1)
self.conv_5_1 = conv(n_feats, n_feats, kernel_size_2)
self.conv_5_2 = conv(n_feats * 2, n_feats * 2, kernel_size_2)
self.confusion = nn.Conv2d(n_feats * 4, n_feats, 1, padding=0, stride=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
input_1 = x
output_3_1 = self.relu(self.conv_3_1(input_1))
output_5_1 = self.relu(self.conv_5_1(input_1))
input_2 = torch.cat([output_3_1, output_5_1], 1)
output_3_2 = self.relu(self.conv_3_2(input_2))
output_5_2 = self.relu(self.conv_5_2(input_2))
input_3 = torch.cat([output_3_2, output_5_2], 1)
output = self.confusion(input_3)
output += x
return output
class Edge_Net(nn.Module):
def __init__(self, scale, n_colors, conv=common.default_conv, n_feats=64):
super(Edge_Net, self).__init__()
kernel_size = 3
act = nn.ReLU(True)
n_blocks = 5
self.n_blocks = n_blocks
modules_head = [conv(3, n_feats, kernel_size)]
modules_body = nn.ModuleList()
for i in range(n_blocks):
modules_body.append(
MSRB())
modules_tail = [
nn.Conv2d(n_feats * (self.n_blocks + 1), n_feats, 1, padding=0, stride=1),
conv(n_feats, n_feats, kernel_size),
common.Upsampler(conv, scale, n_feats, act=False),
conv(n_feats, n_colors, kernel_size)]
self.Edge_Net_head = nn.Sequential(*modules_head)
self.Edge_Net_body = nn.Sequential(*modules_body)
self.Edge_Net_tail = nn.Sequential(*modules_tail)
def forward(self, x):
x = self.Edge_Net_head(x)
res = x
MSRB_out = []
for i in range(self.n_blocks):
x = self.Edge_Net_body[i](x)
MSRB_out.append(x)
MSRB_out.append(res)
res = torch.cat(MSRB_out, 1)
x = self.Edge_Net_tail(res)
return x
class Net(nn.Module):
def __init__(self, scale, res_scale, conv=common.default_conv, n_feats=64):
super(Net, self).__init__()
n_resblock = 40
kernel_size = 3
act = nn.ReLU(True)
m_head = [conv(n_feats, n_feats, kernel_size)]
m_body = [
common.ResBlock(
conv, n_feats, kernel_size, act=act, res_scale=res_scale
) for _ in range(n_resblock)
]
m_tail = [conv(n_feats, 3, kernel_size)]
self.Net_head = nn.Sequential(*m_head)
self.Net_body = nn.Sequential(*m_body)
self.Net_tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.Net_head(x)
res = self.Net_body(x)
res += x
x = self.Net_tail(res)
return x
@ARCH_REGISTRY.register()
class SEAN(nn.Module):
def __init__(self,
n_feats,
scale,
rgb_range,
res_scale,
n_colors,
conv=common.default_conv):
super(SEAN, self).__init__()
kernel_size = 3
act = nn.ReLU(True)
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(rgb_range, rgb_mean, rgb_std)
# define head module
m_LFF = [LFF(scale, n_colors, n_feats=n_feats)]
# define body module
m_Edge = [Edge_Net(scale, n_colors, n_feats=n_feats)]
m_Fushion = [conv(6, n_feats, kernel_size=1)]
# define tail module
m_Net = [Net(scale, res_scale, n_feats=n_feats)]
self.add_mean = common.MeanShift(rgb_range, rgb_mean, rgb_std, 1)
self.lff = nn.Sequential(*m_LFF)
self.edge = nn.Sequential(*m_Edge)
self.fushion = nn.Sequential(*m_Fushion)
self.net = nn.Sequential(*m_Net)
def forward(self, x):
x = self.sub_mean(x)
low = self.lff(x)
high = self.edge(x)
out = torch.cat([low, high], 1)
out = self.fushion(out)
out = self.net(out)
x = self.add_mean(out)
return high, x
# import torch.nn as nn
# import torch
# from basicsr.utils.registry import ARCH_REGISTRY
#
#
# import math
#
# import torch
# import torch.nn as nn
#
#
# def default_conv(in_channels, out_channels, kernel_size, bias=True):
# return nn.Conv2d(
# in_channels, out_channels, kernel_size,
# padding=(kernel_size//2), bias=bias)
#
# class MeanShift(nn.Conv2d):
# def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
# super(MeanShift, self).__init__(3, 3, kernel_size=1)
# std = torch.Tensor(rgb_std)
# self.weight.data = torch.eye(3).view(3, 3, 1, 1)
# self.weight.data.div_(std.view(3, 1, 1, 1))
# self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
# self.bias.data.div_(std)
# self.requires_grad = False
#
# class BasicBlock(nn.Sequential):
# def __init__(
# self, in_channels, out_channels, kernel_size, stride=1, bias=False,
# bn=True, act=nn.ReLU(True)):
#
# m = [nn.Conv2d(
# in_channels, out_channels, kernel_size,
# padding=(kernel_size//2), stride=stride, bias=bias)
# ]
# if bn: m.append(nn.BatchNorm2d(out_channels))
# if act is not None: m.append(act)
# super(BasicBlock, self).__init__(*m)
#
# class ResBlock(nn.Module):
# def __init__(
# self, conv, n_feat, kernel_size,
# bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
#
# super(ResBlock, self).__init__()
# m = []
# for i in range(2):
# m.append(conv(n_feat, n_feat, kernel_size, bias=bias))
# if bn: m.append(nn.BatchNorm2d(n_feat))
# if i == 0: m.append(act)
#
# self.body = nn.Sequential(*m)
# self.res_scale = res_scale
#
# def forward(self, x):
# res = self.body(x).mul(self.res_scale)
# res += x
#
# return res
#
# class Upsampler(nn.Sequential):
# def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):
#
# m = []
# if (scale & (scale - 1)) == 0: # Is scale = 2^n?
# for _ in range(int(math.log(scale, 2))):
# m.append(conv(n_feat, 4 * n_feat, 3, bias))
# m.append(nn.PixelShuffle(2))
# if bn: m.append(nn.BatchNorm2d(n_feat))
# if act: m.append(act())
# elif scale == 3:
# m.append(conv(n_feat, 9 * n_feat, 3, bias))
# m.append(nn.PixelShuffle(3))
# if bn: m.append(nn.BatchNorm2d(n_feat))
# if act: m.append(act())
# else:
# raise NotImplementedError
#
# super(Upsampler, self).__init__(*m)
#
# ## add SELayer
# class SELayer(nn.Module):
# def __init__(self, channel, reduction=16):
# super(SELayer, self).__init__()
# self.avg_pool = nn.AdaptiveAvgPool2d(1)
# self.conv_du = nn.Sequential(
# nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
# nn.ReLU(inplace=True),
# nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
# nn.Sigmoid()
# )
#
# def forward(self, x):
# y = self.avg_pool(x)
# y = self.conv_du(y)
# return x * y
#
# ## add SEResBlock
# class SEResBlock(nn.Module):
# def __init__(
# self, conv, n_feat, kernel_size, reduction,
# bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
#
# super(SEResBlock, self).__init__()
# modules_body = []
# for i in range(2):
# modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
# if bn: modules_body.append(nn.BatchNorm2d(n_feat))
# if i == 0: modules_body.append(act)
# modules_body.append(SELayer(n_feat, reduction))
# self.body = nn.Sequential(*modules_body)
# self.res_scale = res_scale
#
# def forward(self, x):
# res = self.body(x)
# #res = self.body(x).mul(self.res_scale)
# res += x
#
# return res
#
#
# class LFF(nn.Module):
# def __init__(self, scale, n_colors, conv=default_conv, n_feats=64):
# super(LFF, self).__init__()
#
# kernel_size = 3
# n_layes = 5
# act = nn.ReLU(True)
#
# m_head = [conv(3, n_feats, kernel_size)]
#
# m_body = [
# conv(
# n_feats, n_feats, kernel_size
# ) for _ in range(n_layes)
# ]
#
# m_tail = [
# Upsampler(conv, scale, n_feats, act=False),
# nn.Conv2d(
# n_feats, n_colors, kernel_size,
# padding=(kernel_size // 2)
# )
# ]
#
# self.LLF_head = nn.Sequential(*m_head)
# self.LLF_body = nn.Sequential(*m_body)
# self.LLF_tail = nn.Sequential(*m_tail)
#
# def forward(self, x):
# x = self.LLF_head(x)
# x = self.LLF_body(x)
# x = self.LLF_tail(x)
# return x
#
#
# class MSRB(nn.Module):
# def __init__(self, conv=default_conv):
# super(MSRB, self).__init__()
#
# n_feats = 64
# kernel_size_1 = 3
# kernel_size_2 = 5
#
# self.conv_3_1 = conv(n_feats, n_feats, kernel_size_1)
# self.conv_3_2 = conv(n_feats * 2, n_feats * 2, kernel_size_1)
# self.conv_5_1 = conv(n_feats, n_feats, kernel_size_2)
# self.conv_5_2 = conv(n_feats * 2, n_feats * 2, kernel_size_2)
# self.confusion = nn.Conv2d(n_feats * 4, n_feats, 1, padding=0, stride=1)
# self.relu = nn.ReLU(inplace=True)
#
# def forward(self, x):
# input_1 = x
# output_3_1 = self.relu(self.conv_3_1(input_1))
# output_5_1 = self.relu(self.conv_5_1(input_1))
# input_2 = torch.cat([output_3_1, output_5_1], 1)
# output_3_2 = self.relu(self.conv_3_2(input_2))
# output_5_2 = self.relu(self.conv_5_2(input_2))
# input_3 = torch.cat([output_3_2, output_5_2], 1)
# output = self.confusion(input_3)
# output += x
# return output
#
#
# class Edge_Net(nn.Module):
# def __init__(self, scale, n_colors, conv=default_conv, n_feats=64):
# super(Edge_Net, self).__init__()
#
# kernel_size = 3
# act = nn.ReLU(True)
# n_blocks = 5
# self.n_blocks = n_blocks
#
# modules_head = [conv(3, n_feats, kernel_size)]
#
# modules_body = nn.ModuleList()
# for i in range(n_blocks):
# modules_body.append(
# MSRB())
#
# modules_tail = [
# nn.Conv2d(n_feats * (self.n_blocks + 1), n_feats, 1, padding=0, stride=1),
# conv(n_feats, n_feats, kernel_size),
# Upsampler(conv, scale, n_feats, act=False),
# conv(n_feats, n_colors, kernel_size)]
#
# self.Edge_Net_head = nn.Sequential(*modules_head)
# self.Edge_Net_body = nn.Sequential(*modules_body)
# self.Edge_Net_tail = nn.Sequential(*modules_tail)
#
# def forward(self, x):
# x = self.Edge_Net_head(x)
# res = x
#
# MSRB_out = []
# for i in range(self.n_blocks):
# x = self.Edge_Net_body[i](x)
# MSRB_out.append(x)
# MSRB_out.append(res)
#
# res = torch.cat(MSRB_out, 1)
# x = self.Edge_Net_tail(res)
# return x
#
#
# class Net(nn.Module):
# def __init__(self, res_scale, conv=default_conv, n_feats=64):
# super(Net, self).__init__()
#
# n_resblock = 40
# kernel_size = 3
# act = nn.ReLU(True)
#
# m_head = [conv(n_feats, n_feats, kernel_size)]
#
# m_body = [
# ResBlock(
# conv, n_feats, kernel_size, act=act, res_scale=res_scale
# ) for _ in range(n_resblock)
# ]
#
# m_tail = [conv(n_feats, 3, kernel_size)]
#
# self.Net_head = nn.Sequential(*m_head)
# self.Net_body = nn.Sequential(*m_body)
# self.Net_tail = nn.Sequential(*m_tail)
#
# def forward(self, x):
# x = self.Net_head(x)
# res = self.Net_body(x)
# res += x
# x = self.Net_tail(res)
# return x
#
# @ARCH_REGISTRY.register()
# class SEAN(nn.Module):
# def __init__(self,
# n_feats,
# scale,
# n_colors,
# rgb_range,
# res_scale,
# conv=default_conv):
# super(SEAN, self).__init__()
#
# rgb_mean = (0.4488, 0.4371, 0.4040)
# rgb_std = (1.0, 1.0, 1.0)
# self.sub_mean = MeanShift(rgb_range, rgb_mean, rgb_std)
#
# # define head module
# m_LFF = [LFF(scale, n_colors, n_feats=n_feats)]
#
# # define body module
# m_Edge = [Edge_Net(scale, n_colors, n_feats=n_feats)]
#
# m_Fushion = [conv(6, n_feats, kernel_size=1)]
#
# # define tail module
# m_Net = [Net(res_scale, n_feats=n_feats)]
#
# self.add_mean = MeanShift(rgb_range, rgb_mean, rgb_std, 1)
#
# self.lff = nn.Sequential(*m_LFF)
# self.edge = nn.Sequential(*m_Edge)
# self.fushion = nn.Sequential(*m_Fushion)
# self.net = nn.Sequential(*m_Net)
#
# def forward(self, x):
# x = self.sub_mean(x)
# low = self.lff(x)
# high = self.edge(x)
# out = torch.cat([low, high], 1)
# out = self.fushion(out)
# out = self.net(out)
# x = self.add_mean(out)
# return high, x
| 30.93763 | 88 | 0.555541 | [
"Apache-2.0",
"MIT"
] | neu-szy/BasicSR | basicsr/archs/seanet_arch.py | 14,881 | Python |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
autodoc_default_flags = ['members', 'inherited-members']
# -- Project information -----------------------------------------------------
project = 'Relaxed Lasso'
copyright = '2020 Continental Corporation'
author = 'Grégory Vial, Flora Estermann'
# The full version, including alpha/beta/rc tags
from relaxed_lasso._version import __version__
release = __version__
# -- General configuration ---------------------------------------------------
# Explicitly assign the master document.
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 34.730159 | 80 | 0.661792 | [
"BSD-3-Clause"
] | GregVial/RelaxedLasso | docs/source/conf.py | 2,189 | Python |
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.trainer.fn_args_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tfx.components.trainer import fn_args_utils
from tfx.proto import trainer_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.types import standard_component_specs
from tfx.utils import proto_utils
class FnArgsUtilsTest(tf.test.TestCase):
def testGetCommonFnArgs(self):
source_data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
# Create input dict.
examples = standard_artifacts.Examples()
examples.uri = os.path.join(source_data_dir,
'transform/transformed_examples')
examples.split_names = artifact_utils.encode_split_names(['train', 'eval'])
transform_output = standard_artifacts.TransformGraph()
transform_output.uri = os.path.join(source_data_dir,
'transform/transform_graph')
schema = standard_artifacts.Schema()
schema.uri = os.path.join(source_data_dir, 'schema_gen')
base_model = standard_artifacts.Model()
base_model.uri = os.path.join(source_data_dir, 'trainer/previous')
input_dict = {
standard_component_specs.EXAMPLES_KEY: [examples],
standard_component_specs.TRANSFORM_GRAPH_KEY: [transform_output],
standard_component_specs.SCHEMA_KEY: [schema],
standard_component_specs.BASE_MODEL_KEY: [base_model],
}
# Create exec properties skeleton.
exec_properties = {
'train_args':
proto_utils.proto_to_json(trainer_pb2.TrainArgs(num_steps=1000)),
'eval_args':
proto_utils.proto_to_json(trainer_pb2.EvalArgs(num_steps=500)),
}
fn_args = fn_args_utils.get_common_fn_args(input_dict, exec_properties,
'tempdir')
self.assertEqual(fn_args.working_dir, 'tempdir')
self.assertEqual(fn_args.train_steps, 1000)
self.assertEqual(fn_args.eval_steps, 500)
self.assertLen(fn_args.train_files, 1)
self.assertEqual(fn_args.train_files[0],
os.path.join(examples.uri, 'Split-train', '*'))
self.assertLen(fn_args.eval_files, 1)
self.assertEqual(fn_args.eval_files[0],
os.path.join(examples.uri, 'Split-eval', '*'))
self.assertEqual(fn_args.schema_path,
os.path.join(schema.uri, 'schema.pbtxt'))
# Depending on execution environment, the base model may have been stored
# at .../Format-Servo/... or .../Format-Serving/... directory patterns.
self.assertRegexpMatches(
fn_args.base_model,
os.path.join(base_model.uri,
r'Format-(Servo|Serving)/export/chicago-taxi/\d+'))
self.assertEqual(fn_args.transform_graph_path, transform_output.uri)
self.assertIsInstance(fn_args.data_accessor, fn_args_utils.DataAccessor)
if __name__ == '__main__':
tf.test.main()
| 39.085106 | 79 | 0.707403 | [
"Apache-2.0"
] | Kareem-negm/tfx | tfx/components/trainer/fn_args_utils_test.py | 3,674 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['UserSettings']
class UserSettings(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
properties: Optional[pulumi.Input[pulumi.InputType['UserPropertiesArgs']]] = None,
user_settings_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Response to get user settings
API Version: 2018-10-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['UserPropertiesArgs']] properties: The cloud shell user settings properties.
:param pulumi.Input[str] user_settings_name: The name of the user settings
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if properties is None and not opts.urn:
raise TypeError("Missing required property 'properties'")
__props__['properties'] = properties
__props__['user_settings_name'] = user_settings_name
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:portal:UserSettings"), pulumi.Alias(type_="azure-native:portal/latest:UserSettings"), pulumi.Alias(type_="azure-nextgen:portal/latest:UserSettings"), pulumi.Alias(type_="azure-native:portal/v20181001:UserSettings"), pulumi.Alias(type_="azure-nextgen:portal/v20181001:UserSettings")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(UserSettings, __self__).__init__(
'azure-native:portal:UserSettings',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'UserSettings':
"""
Get an existing UserSettings resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["properties"] = None
return UserSettings(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.UserPropertiesResponse']:
"""
The cloud shell user settings properties.
"""
return pulumi.get(self, "properties")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 43.670103 | 370 | 0.664542 | [
"Apache-2.0"
] | pulumi-bot/pulumi-azure-native | sdk/python/pulumi_azure_native/portal/user_settings.py | 4,236 | Python |
# Copyright 2020. Allen Institute. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy as np
import six
import matplotlib.pyplot as plt
import types
import copy
from functools import partial
from .spike_trains import SpikeTrains
from .spike_trains_api import SpikeTrainsAPI
def __get_spike_trains(spike_trains):
"""Make sure SpikeTrainsAPI object is always returned"""
if isinstance(spike_trains, six.string_types):
# Load spikes from file
return SpikeTrains.load(spike_trains)
elif isinstance(spike_trains, (SpikeTrains, SpikeTrainsAPI)):
return spike_trains
raise AttributeError('Could not parse spiketrains. Pass in file-path, SpikeTrains object, or list of the previous')
def __get_population(spike_trains, population):
"""Helper function to figure out which population of nodes to use."""
pops = spike_trains.populations
if population is None:
# If only one population exists in spikes object/file select that one
if len(pops) > 1:
raise Exception('SpikeTrains contains more than one population of nodes. Use "population" parameter '
'to specify population to display.')
else:
return pops[0]
elif population not in pops:
raise Exception('Could not find node population "{}" in SpikeTrains, only found {}'.format(population, pops))
else:
return population
def __get_node_groups(spike_trains, node_groups, population):
"""Helper function for parsing the 'node_groups' params"""
if node_groups is None:
# If none are specified by user make a 'node_group' consisting of all nodes
selected_nodes = spike_trains.node_ids(population=population)
return [{'node_ids': selected_nodes, 'c': 'b'}], selected_nodes
else:
# Fetch all node_ids which can be used to filter the data.
node_groups = copy.deepcopy(node_groups) # Make a copy since later we may be altering the dictionary
selected_nodes = np.array(node_groups[0]['node_ids'])
for grp in node_groups[1:]:
if 'node_ids' not in grp:
raise AttributeError('Could not find "node_ids" key in node_groups parameter.')
selected_nodes = np.concatenate((selected_nodes, np.array(grp['node_ids'])))
return node_groups, selected_nodes
def plot_raster(spike_trains, with_histogram=True, population=None, node_groups=None, times=None, title=None,
show=True, save_as=None):
"""will create a raster plot (plus optional histogram) from a SpikeTrains object or SONATA Spike-Trains file. Will
return the figure
By default will display all nodes, if you want to only display a subset of nodes and/or group together different
nodes (by node_id) by dot colors and labels then you can use the node_groups, which should be a list of dicts::
plot_raster('/path/to/my/spike.h5',
node_groups=[{'node_ids': range(0, 70), 'c': 'b', 'label': 'pyr'}, # first 70 nodes are blue pyr cells
{'node_ids': range(70, 100), 'c': 'r', 'label': 'inh'}]) # last 30 nodes are red inh cells
The histogram will not be grouped.
:param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.
:param with_histogram: If True the a histogram will be shown as a small subplot below the scatter plot. Default
True.
:param population: string. If a spikes-file contains more than one population of nodes, use this to determine which
nodes to actually plot. If only one population exists and population=None then the function will find it by
default.
:param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should
be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for
label and color. If None all nodes will be labeled and colored the same.
:param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking
data.
:param title: str, Use to add a title. Default no tile
:param show: bool to display or not display plot. default True.
:param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not
save plot.
:return: matplotlib figure.Figure object
"""
spike_trains = __get_spike_trains(spike_trains=spike_trains)
pop = __get_population(spike_trains=spike_trains, population=population)
node_groups, selected_ids = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop)
# Only show a legend if one of the node_groups have an explicit label, otherwise matplotlib will show an empty
# legend box which looks bad
show_legend = False
# Situation where if the last (or first) M nodes don't spike matplotlib will cut off the y range, but it should
# show these as empty rows. To do this need to keep track of range of all node_ids
min_id, max_id = np.inf, -1
spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False)
spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)]
if times is not None:
min_ts, max_ts = times[0], times[1]
spikes_df = spikes_df[(spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1])]
else:
min_ts = np.min(spikes_df['timestamps'])
max_ts = np.max(spikes_df['timestamps'])
# Used to determine
if with_histogram:
fig, axes = plt.subplots(2, 1, gridspec_kw={'height_ratios': [7, 1]}, squeeze=True)
raster_axes = axes[0]
bottom_axes = hist_axes = axes[1]
else:
fig, axes = plt.subplots(1, 1)
bottom_axes = raster_axes = axes
hist_axes = None
for node_grp in node_groups:
grp_ids = node_grp.pop('node_ids')
grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)]
# If label exists for at-least one group we want to show
show_legend = show_legend or 'label' in node_grp
# Finds min/max node_id for all node groups
min_id = np.min([np.min(grp_ids), min_id])
max_id = np.max([np.max(grp_ids), max_id])
raster_axes.scatter(grp_spikes['timestamps'], grp_spikes['node_ids'], lw=0, s=8, **node_grp)
if show_legend:
raster_axes.legend(loc='upper right')
if title:
raster_axes.set_title(title)
raster_axes.set_ylabel('node_ids')
raster_axes.set_ylim(min_id - 0.5, max_id + 1) # add buffering to range else the rows at the ends look cut-off.
raster_axes.set_xlim(min_ts, max_ts + 1)
bottom_axes.set_xlabel('timestamps ({})'.format(spike_trains.units(population=pop)))
if with_histogram:
# Add a histogram if necessarry
hist_axes.hist(spikes_df['timestamps'], 100)
hist_axes.set_xlim(min_ts - 0.5, max_ts + 1)
hist_axes.axes.get_yaxis().set_visible(False)
raster_axes.set_xticks([])
if save_as:
plt.savefig(save_as)
if show:
plt.show()
return fig
def moving_average(data, window_size=10):
h = int(window_size / 2)
x_max = len(data)
return [np.mean(data[max(0, x - h):min(x_max, x + h)]) for x in range(0, x_max)]
def plot_rates(spike_trains, population=None, node_groups=None, times=None, smoothing=False,
smoothing_params=None, title=None, show=True, save_as=None):
"""Calculate and plot the rates of each node in a SpikeTrains object or SONATA Spike-Trains file. If start and stop
times are not specified from the "times" parameter, will try to parse values from the timestamps data.
If you want to only display a subset of nodes and/or group together different nodes (by node_id) by dot colors and
labels then you can use the node_groups, which should be a list of dicts::
plot_rates('/path/to/my/spike.h5',
node_groups=[{'node_ids': range(0, 70), 'c': 'b', 'label': 'pyr'},
{'node_ids': range(70, 100), 'c': 'r', 'label': 'inh'}])
:param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.
:param population: string. If a spikes-file contains more than one population of nodes, use this to determine which
nodes to actually plot. If only one population exists and population=None then the function will find it by
default.
:param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should
be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for
label and color. If None all nodes will be labeled and colored the same.
:param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking
data.
:param smoothing: Bool or function. Used to smooth the data. By default (False) no smoothing will be done. If True
will using a moving average smoothing function. Or use a function pointer.
:param smoothing_params: dict, parameters when using a function pointer smoothing value.
:param title: str, Use to add a title. Default no tile
:param show: bool to display or not display plot. default True.
:param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not
save plot.
:return: matplotlib figure.Figure object
"""
spike_trains = __get_spike_trains(spike_trains=spike_trains)
pop = __get_population(spike_trains=spike_trains, population=population)
node_groups, selected_ids = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop)
# Determine if smoothing will be applied to the data
smoothing_params = smoothing_params or {} # pass in empty parameters
if isinstance(smoothing, types.FunctionType):
smoothing_fnc = partial(smoothing, **smoothing_params)
elif smoothing:
smoothing_fnc = partial(moving_average, **smoothing_params)
else:
smoothing_fnc = lambda d: d # Use a filler function that won't do anything
# get data
spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False)
spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)]
if times is not None:
recording_interval = times[1] - times[0]
spikes_df = spikes_df[(spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1])]
else:
recording_interval = np.max(spikes_df['timestamps']) - np.min(spikes_df['timestamps'])
# Iterate through each group of nodes and add to the same plot
fig, axes = plt.subplots()
show_legend = False # Only show labels if one of the node group has label value
for node_grp in node_groups:
show_legend = show_legend or 'label' in node_grp # If label exists for at-least one group we want to show
grp_ids = node_grp.pop('node_ids')
grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)]
spike_rates = grp_spikes.groupby('node_ids').size() / (recording_interval / 1000.0)
axes.plot(np.array(spike_rates.index), smoothing_fnc(spike_rates), '.', **node_grp)
axes.set_ylabel('Firing Rates (Hz)')
axes.set_xlabel('node_ids')
if show_legend:
axes.legend() # loc='upper right')
if title:
axes.set_title(title)
if save_as:
plt.savefig(save_as)
if show:
plt.show()
return fig
def plot_rates_boxplot(spike_trains, population=None, node_groups=None, times=None, title=None, show=True,
save_as=None):
"""Creates a box plot of the firing rates taken from a SpikeTrains object or SONATA Spike-Trains file. If start
and stop times are not specified from the "times" parameter, will try to parse values from the timestamps data.
By default will plot all nodes together. To only display a subset of the nodes and/or create groups of nodes use
the node_groups options::
plot_rates_boxplot(
'/path/to/my/spike.h5',
node_groups=[{'node_ids': range(0, 70), 'label': 'pyr'},
{'node_ids': range(70, 100), 'label': 'inh'}]
)
:param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.
:param population: string. If a spikes-file contains more than one population of nodes, use this to determine which
nodes to actually plot. If only one population exists and population=None then the function will find it by
default.
:param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should
be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for
label and color. If None all nodes will be labeled and colored the same.
:param title: str, Use to add a title. Default no tile
:param show: bool to display or not display plot. default True.
:param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not
save plot.
:return: matplotlib figure.Figure object
"""
spike_trains = __get_spike_trains(spike_trains=spike_trains)
pop = __get_population(spike_trains=spike_trains, population=population)
node_groups, selected_ids = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop)
spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False)
spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)]
if times is not None:
recording_interval = times[1] - times[0]
spikes_df = spikes_df[(spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1])]
else:
recording_interval = np.max(spikes_df['timestamps']) - np.min(spikes_df['timestamps'])
fig, axes = plt.subplots()
rates_data = []
rates_labels = []
if len(node_groups) == 1 and 'label' not in node_groups[0]:
node_groups[0]['label'] = 'All Nodes'
for i, node_grp in enumerate(node_groups):
rates_labels.append(node_grp.get('label', 'Node Group {}'.format(i)))
grp_ids = node_grp.pop('node_ids')
grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)]
spike_rates = grp_spikes.groupby('node_ids').size() / (recording_interval / 1000.0)
rates_data.append(spike_rates)
axes.boxplot(rates_data)
axes.set_ylabel('Firing Rates (Hz)')
axes.set_xticklabels(rates_labels)
if title:
axes.set_title(title)
if save_as:
plt.savefig(save_as)
if show:
plt.show()
return fig | 47.639296 | 120 | 0.697999 | [
"BSD-3-Clause"
] | chenziao/bmtk | bmtk/utils/reports/spike_trains/plotting.py | 16,245 | Python |
import time
import pygame
def manual_control(**kwargs):
from .knights_archers_zombies import env as _env
env = _env(**kwargs)
env.reset()
done = False
cur_agent = 0
frame_count = 0
# frame_limit = 500
quit_game = 0
while not done:
# while frame_count < frame_limit: # Uncomment this if you want the game to run for fame_limit amount of frames instead of ending by normal game conditions (useful for testing purposes)
agents = env.agents
frame_count += 1
actions = [5 for x in range(len(env.agents))] # If you want to do manual input
# 5 is do nothing, 0 is up, 1 is down, 2 is turn CW, 3 is CCW, 4 is attack
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
quit_game = 1
break
if event.key == pygame.K_BACKSPACE:
# Backspace to reset
env.reset()
# totalReward = 0
if event.key == pygame.K_a:
cur_agent -= 1
if cur_agent < 0:
cur_agent = len(agents) - 1
if event.key == pygame.K_d:
cur_agent += 1
if cur_agent > len(agents) - 1:
cur_agent = 0
if event.key == pygame.K_q:
actions[cur_agent] = 2
if event.key == pygame.K_e:
actions[cur_agent] = 3
if event.key == pygame.K_w:
actions[cur_agent] = 0
if event.key == pygame.K_s:
actions[cur_agent] = 1
if event.key == pygame.K_f:
actions[cur_agent] = 4
if quit_game:
break
for a in actions:
env.step(a)
env.render()
done = any(env.dones.values())
env.close()
| 34.275862 | 193 | 0.492455 | [
"MIT"
] | AnanthHari/PettingZoo | pettingzoo/gamma/knights_archers_zombies/manual_control.py | 1,988 | Python |
import sys, getopt
from .summarizer import summarize
from .keywords import keywords
# Types of summarization
SENTENCE = 0
WORD = 1
def exit_with_error(err):
print("Error: " + str(err))
usage()
sys.exit(2)
def get_arguments():
try:
opts, args = getopt.getopt(sys.argv[1:], "t:s:r:w:h", ["text=", "summary=", "ratio=", "words=", "help"])
except getopt.GetoptError as err:
exit_with_error(err)
path = None
summarize_by = SENTENCE
ratio = 0.2
words = None
for o, a in opts:
if o in ("-t", "--text"):
path = a
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-s", "--summary"):
summarize_by = int(a)
elif o in ("-w", "--words"):
words = int(a)
elif o in ("-r", "--ratio"):
ratio = float(a)
else:
assert False, "unhandled option"
if path is None:
exit_with_error("-t option is required.")
return path, summarize_by, ratio, words
help_text = """Usage: textrank -t FILE
-s UNIT, --summary=UNIT:
\tType of unit to summarize: sentence (0) or word (1). Default value: 0
\t0: Sentence. 1: Word
-t FILE, --text=FILE:
\tPATH to text to summarize
-r RATIO, --ratio=RATIO:
\tFloat number (0,1] that defines the length of the summary. It's a proportion of the original text. Default value: 0.2.
-w WORDS, --words=WORDS:
\tNumber to limit the length of the summary. The length option is ignored if the word limit is set.
-h, --help:
\tprints this help
"""
def usage():
print(help_text)
def textrank(text, summarize_by=SENTENCE, ratio=0.2, words=None):
if summarize_by == SENTENCE:
return summarize(text, ratio, words)
else:
return keywords(text, ratio, words)
def main():
path, summarize_by, ratio, words = get_arguments()
with open(path) as file:
text = file.read()
print(textrank(text, summarize_by, ratio, words))
if __name__ == "__main__":
main()
| 24.597561 | 120 | 0.600892 | [
"MIT"
] | LShostenko/textrank | summa/textrank.py | 2,017 | Python |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import datetime
import re
import logging
from django.conf import settings
from django.utils import translation, timezone
from django.utils.translation import ugettext_lazy as _
from pipeline.core.flow.activity import Service, StaticIntervalGenerator
from pipeline.core.flow.io import StringItemSchema, ObjectItemSchema
from pipeline.component_framework.component import Component
from gcloud.core.models import Project
__group_name__ = _("蓝鲸服务(BK)")
LOGGER = logging.getLogger("celery")
class PauseService(Service):
__need_schedule__ = True
def execute(self, data, parent_data):
return True
def schedule(self, data, parent_data, callback_data=None):
if callback_data is not None:
data.outputs.callback_data = callback_data
self.finish_schedule()
return True
def inputs_format(self):
return [
self.InputItem(
name=_("描述"), key="description", type="string", schema=StringItemSchema(description=_("描述")),
)
]
def outputs_format(self):
return [
self.OutputItem(
name=_("API回调数据"),
key="callback_data",
type="object",
schema=ObjectItemSchema(description=_("通过node_callback API接口回调并传入数据,支持dict数据"), property_schemas={},),
),
]
class PauseComponent(Component):
name = _("暂停")
code = "pause_node"
bound_service = PauseService
form = settings.STATIC_URL + "components/atoms/bk/pause.js"
desc = _("该节点可以通过node_callback API接口进行回调并传入数据,callback_data参数为dict类型,回调数据会作为该节点的输出数据")
class SleepTimerService(Service):
__need_schedule__ = True
interval = StaticIntervalGenerator(0)
# 匹配年月日 时分秒 正则 yyyy-MM-dd HH:mm:ss
date_regex = re.compile(
r"%s %s"
% (
r"^(((\d{3}[1-9]|\d{2}[1-9]\d{1}|\d{1}[1-9]\d{2}|[1-9]\d{3}))|"
r"(29/02/((\d{2})(0[48]|[2468][048]|[13579][26])|((0[48]|[2468][048]|[3579][26])00))))-"
r"((0[13578]|1[02])-((0[1-9]|[12]\d|3[01]))|"
r"((0[469]|11)-(0[1-9]|[12]\d|30))|(02)-(0[1-9]|[1]\d|2[0-8]))",
r"((0|[1])\d|2[0-3]):(0|[1-5])\d:(0|[1-5])\d$",
)
)
seconds_regex = re.compile(r"^\d{1,8}$")
def inputs_format(self):
return [
self.InputItem(
name=_("定时时间"),
key="bk_timing",
type="string",
schema=StringItemSchema(description=_("定时时间,格式为秒(s) 或 (%%Y-%%m-%%d %%H:%%M:%%S)")),
),
self.InputItem(
name=_("是否强制晚于当前时间"),
key="force_check",
type="bool",
schema=StringItemSchema(description=_("用户输入日期格式时是否强制要求时间晚于当前时间,只对日期格式定时输入有效")),
),
]
def outputs_format(self):
return []
def execute(self, data, parent_data):
if parent_data.get_one_of_inputs("language"):
translation.activate(parent_data.get_one_of_inputs("language"))
timing = data.get_one_of_inputs("bk_timing")
force_check = data.get_one_of_inputs("force_check", True)
# 项目时区获取
project = Project.objects.get(id=parent_data.inputs.project_id)
project_tz = timezone.pytz.timezone(project.time_zone)
data.outputs.business_tz = project_tz
now = datetime.datetime.now(tz=project_tz)
if self.date_regex.match(str(timing)):
eta = project_tz.localize(datetime.datetime.strptime(timing, "%Y-%m-%d %H:%M:%S"))
if force_check and now > eta:
message = _("定时时间需晚于当前时间")
data.set_outputs("ex_data", message)
return False
elif self.seconds_regex.match(str(timing)):
# 如果写成+号 可以输入无限长,或考虑前端修改
eta = now + datetime.timedelta(seconds=int(timing))
else:
message = _("输入参数%s不符合【秒(s) 或 时间(%%Y-%%m-%%d %%H:%%M:%%S)】格式") % timing
data.set_outputs("ex_data", message)
return False
self.logger.info("wake time: {}".format(eta))
data.outputs.timing_time = eta
return True
def schedule(self, data, parent_data, callback_data=None):
timing_time = data.outputs.timing_time
business_tz = data.outputs.business_tz
now = datetime.datetime.now(tz=business_tz)
t_delta = timing_time - now
if t_delta.total_seconds() < 1:
self.finish_schedule()
# 这里减去 0.5s 的目的是尽可能的减去 execute 执行带来的误差
self.interval.interval = t_delta.total_seconds() - 0.5
return True
class SleepTimerComponent(Component):
name = _("定时")
code = "sleep_timer"
bound_service = SleepTimerService
form = settings.STATIC_URL + "components/atoms/bk/timer.js"
desc = _("最长定时时间受到环境配置影响,具体时长请咨询系统管理员")
| 34.974843 | 118 | 0.621471 | [
"Apache-2.0"
] | 64761294/bk-sops | pipeline_plugins/components/collections/controller.py | 6,061 | Python |
from typing import List
from typing import Tuple
from pcapi.core import mails
from pcapi.core.bookings import constants as booking_constants
from pcapi.core.bookings.models import Booking
from pcapi.core.categories import subcategories
from pcapi.core.mails.models.sendinblue_models import SendinblueTransactionalEmailData
from pcapi.core.mails.transactional.sendinblue_template_ids import TransactionalEmail
from pcapi.core.users.models import User
def get_expired_bookings_to_beneficiary_data(
beneficiary: User, bookings: list[Booking], withdrawal_period: int
) -> SendinblueTransactionalEmailData:
return SendinblueTransactionalEmailData(
template=TransactionalEmail.EXPIRED_BOOKING_TO_BENEFICIARY.value,
params={
"FIRSTNAME": beneficiary.firstName,
"BOOKINGS": _extract_bookings_information_from_bookings_list(bookings),
"WITHDRAWAL_PERIOD": withdrawal_period,
},
)
def _extract_bookings_information_from_bookings_list(bookings: list[Booking]) -> list[dict]:
bookings_info = []
for booking in bookings:
stock = booking.stock
offer = stock.offer
bookings_info.append(
{
"offer_name": offer.name,
"venue_name": offer.venue.publicName if offer.venue.publicName else offer.venue.name,
}
)
return bookings_info
def send_expired_bookings_to_beneficiary_email(beneficiary: User, bookings: list[Booking]) -> None:
success = True
books_bookings, other_bookings = _filter_books_bookings(bookings)
if books_bookings:
books_bookings_data = get_expired_bookings_to_beneficiary_data(
beneficiary, books_bookings, booking_constants.BOOKS_BOOKINGS_AUTO_EXPIRY_DELAY.days
)
success &= mails.send(recipients=[beneficiary.email], data=books_bookings_data)
if other_bookings:
other_bookings_data = get_expired_bookings_to_beneficiary_data(
beneficiary, other_bookings, booking_constants.BOOKINGS_AUTO_EXPIRY_DELAY.days
)
success &= mails.send(recipients=[beneficiary.email], data=other_bookings_data)
return success
def _filter_books_bookings(bookings: list[Booking]) -> Tuple[List[Booking], List[Booking]]:
books_bookings = []
other_bookings = []
for booking in bookings:
if booking.stock.offer.subcategoryId == subcategories.LIVRE_PAPIER.id:
books_bookings.append(booking)
else:
other_bookings.append(booking)
return books_bookings, other_bookings
| 36.7 | 101 | 0.735306 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | adriengraux/pass-culture-api | src/pcapi/core/mails/transactional/bookings/booking_expiration_to_beneficiary.py | 2,569 | Python |
__version__ = "0.1.0"
import mmap
import os
# from .ext import load_file, parse
#-------------------------------------------------------------------------------
def parse_file(path, **kw_args):
fd = os.open(path, os.O_RDONLY)
try:
map = mmap.mmap(fd, 0, prot=mmap.PROT_READ)
return parse(map, **kw_args)
finally:
os.close(fd)
| 19.473684 | 80 | 0.475676 | [
"MIT"
] | alexhsamuel/fixprs | src/fixprs/__init__.py | 370 | Python |
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022 Osyris contributors (https://github.com/nvaytet/osyris)
# flake8: noqa
from .array import Array
from .datagroup import Datagroup
from .dataset import Dataset
from .plot import Plot
from .ism_physics import get_eos, get_opacities
| 26.545455 | 76 | 0.791096 | [
"BSD-3-Clause"
] | Adnan-Ali-Ahmad/osyris | src/osyris/core/__init__.py | 292 | Python |
import re
import shutil
import os
class FileRenamer:
def __init__(self, text):
self.text = text
self.processed = self.text
def reset(self):
self.processed = self.text
def replace_tag(self, tag, value):
if value is None:
value = ''
pattern = re.compile(r'\$\{([-_ ]*)' + tag + '([-_ ]*)\}')
result = re.findall(pattern, self.text)
if len(result) == 0:
value = ''
else:
prepend = result[0][0]
append = result[0][1]
pattern = re.compile(r'(\$\{[-_ ]*' + tag + '[-_ ]*\})')
result = re.findall(pattern, self.text)
if len(result) == 0:
return
if value == '':
self.processed = self.processed.replace(result[0], '')
else:
self.processed = self.processed.replace(
result[0], prepend + value + append)
def rename_file(self, file, track, release, artist):
extension = file.split('.')[-1]
self.replace_tag('d', track['disc'])
self.replace_tag('dt', track['disctotal'])
self.replace_tag('n', track['number'])
self.replace_tag('nt', track['tracktotal'])
self.replace_tag('t', track['title'])
self.replace_tag('a', artist.name)
self.replace_tag('b', release.album_artist)
dirname = os.path.dirname(file)
if dirname == '':
dirname = '.' + os.sep
shutil.move(file, dirname + os.sep + self.processed + '.' + extension)
self.reset()
| 32.25 | 78 | 0.527778 | [
"MIT"
] | makzyt4/discogs-tagger | discogstagger/renamer.py | 1,548 | Python |
from filelock import FileLock
import torch
import pandas as pd
from .tools import BuildingTemperatureDataset
import os
def train(model, device, dataloader, optimizer):
"""
Performs one epoch of training.
Order of rooms in building and in data must match otherwise model will fit wrong rooms to data.
"""
model.reset_iv() # Reset initial value
model.train()
model.cooling_policy.eval()
# Stops Autograd endlessly keeping track of the graph. Memory Leak!
for layer in model.cooling_policy.parameters():
layer.requires_grad = False
num_cols = len(model.building.rooms) # number of columns to use from data.
num_batches = len(dataloader)
train_loss = 0
loss_fn = torch.nn.MSELoss()
for batch, (time, temp) in enumerate(dataloader):
time, temp = time.to(device), temp.to(device) # Put on GPU if available
# Get model arguments:
time = time.squeeze(0)
temp = temp.squeeze(0)
# Compute prediction and loss
pred = model(time)
pred = pred.squeeze(-1) # change from column to row matrix
loss = loss_fn(pred[:, 2:], temp[:, 0:num_cols])
train_loss += loss.item()
# get last output and use for next initial value
model.iv = pred[-1, :].unsqueeze(1).detach() # MUST DETACH GRAD
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
return train_loss / num_batches
def test(model, device, dataloader):
model.reset_iv() # Reset initial value
model.eval() # Put model in evaluation mode
num_batches = len(dataloader)
num_cols = len(model.building.rooms) # number of columns to take from data.
test_loss = 0
loss_fn = torch.nn.MSELoss()
with torch.no_grad():
for (time, temp) in dataloader:
time, temp = time.to(device), temp.to(device) # Put on GPU if available
time = time.squeeze(0)
temp = temp.squeeze(0)
pred = model(time)
pred = pred.squeeze(-1) # change from column to row matrix
test_loss += loss_fn(pred[:, 2:], temp[:, 0:num_cols]).item()
# get last output and use for next initial value
model.iv = pred[-1, :].unsqueeze(1).detach() # MUST DETACH GRAD
test_loss /= num_batches
return test_loss
def dataset_creator(path, sample_size, dt):
path_sorted = sort_data(path, dt)
with FileLock(f"{os.path.dirname(os.path.abspath(path_sorted))}.lock"):
training_data = BuildingTemperatureDataset(path_sorted, sample_size, train=True)
train_dataloader = torch.utils.data.DataLoader(training_data, batch_size=1, shuffle=False)
test_data = BuildingTemperatureDataset(path_sorted, sample_size, test=True)
test_dataloader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False)
return train_dataloader, test_dataloader
def sort_data(path, dt):
"""
Check if path has sorted data tag (_sorted)
If not check if data has previously been sorted and exists in the directory.
Check to see if the value dt is correct
If not sort data and write filename_sorted.csv
data is sorted by time in ascending order and downsampled to a frequency of dt seconds.
Missing values are interpolated.
A time-date string is also inserted.
"""
def sort(path, dt):
df = pd.read_csv(path)
if path[-11:] == '_sorted.csv':
path_sorted = path
else:
path_sorted = path[:-4] + '_sorted.csv'
# Sort df by time (raw data not always in order)
df = df.sort_values(by=["time"], ascending=True)
# insert date-time value at start of df
try:
df.insert(loc=0, column='date-time', value=pd.to_datetime(df['time'], unit='ms'))
except ValueError:
raise ValueError('Data appears to have already been sorted. Check if still appropriate and add _sorted.csv tag to avoid this error.')
# downscale data to a frequency of dt (seconds) use the mean value and round to 2dp.
df = df.set_index('date-time').resample(str(dt) + 's').mean().round(2)
# time column is converted to unix epoch seconds to match the date-time
df["time"] = (df.index - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")
# change date-time from UTC to Local time
df = df.tz_localize('Europe/London')
df = df.interpolate().round(2) # interpolate missing values NaN
df.to_csv(path_sorted, index=True)
def need_to_sort(path, dt):
def get_dt(path):
df_dt = pd.read_csv(path)['time'][0:2].values
return df_dt[1] - df_dt[0]
# Does path already have sorted tag?
if path[-11:] == '_sorted.csv':
# if so, is dt correct?
if get_dt(path) == dt:
return False # path and file is correct dont sort
else:
return True # dt is wrong, re-sort
# path does not contain _sorted.csv
else:
# Does path_sorted exist?
path_sorted = path[:-4] + '_sorted.csv'
import os.path
if os.path.isfile(path_sorted): # check if file already exists
# if file exists check if dt is correct
if get_dt(path_sorted) == dt:
return False # correct file already exists don't sort
else:
return True # file exists but dt wrong, re-sort
else: # File doesn't exist
return True
if need_to_sort(path, dt):
sort(path, dt)
# return the path_sorted
if path[-11:] == '_sorted.csv':
path_sorted = path
else:
path_sorted = path[:-4] + '_sorted.csv'
return path_sorted
class OptimiseRC:
"""
Parameters
----------
model : object
RCModel class object.
csv_path : string
Path to .csv file containing room temperature data.
Data will be sorted if not done already and saved to a new file with the tag '_sorted'
sample_size : int
Length of indexes to sample from dataset per batch.
dt : int
Timestep data will be resampled to.
lr : float
Learning rate for optimiser.
model_id : int
Unique identifier used when optimising multiple models.
see https://docs.ray.io/en/latest/using-ray-with-pytorch.html
"""
def __init__(self, model, csv_path, sample_size, dt=30, lr=1e-3, opt_id=0):
self.model = model
self.model.init_params() # randomise parameters
self.model_id = opt_id
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.train_dataloader, self.test_dataloader = dataset_creator(csv_path, int(sample_size), int(dt))
self.optimizer = torch.optim.Adam([self.model.params, self.model.cooling], lr=lr)
def train(self):
avg_loss = train(self.model, self.device, self.train_dataloader, self.optimizer)
return avg_loss
def test(self):
test_loss = test(self.model, self.device, self.test_dataloader)
return test_loss
def train_loop(self, epochs):
print(self.model.params)
for i in range(int(epochs)):
# print(f"Epoch {i + 1}\n-------------------------------")
testloss = self.train()
results = [testloss, self.model]
return results
| 32.175966 | 145 | 0.620381 | [
"MIT"
] | BFourcin/rcmodel | src/rcmodel/optimisation.py | 7,497 | Python |
from .delayeddataframe import DelayedDataFrame # noqa:F401
from . import annotator # noqa: F401
from . import regions
from . import genes
__version__ = '0.4'
_all__ = [DelayedDataFrame, annotator, regions, genes, __version__]
| 23.1 | 67 | 0.761905 | [
"MIT"
] | IMTMarburg/mbf_genomics | src/mbf_genomics/__init__.py | 231 | Python |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import json
import unittest
from unittest import mock
import pandas as pd
import airflow.providers.amazon.aws.operators.hive_to_dynamodb
from airflow import DAG
from airflow.providers.amazon.aws.hooks.aws_dynamodb_hook import AwsDynamoDBHook
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
try:
from moto import mock_dynamodb2
except ImportError:
mock_dynamodb2 = None
class TestHiveToDynamoDBTransferOperator(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.sql = 'SELECT 1'
self.hook = AwsDynamoDBHook(
aws_conn_id='aws_default', region_name='us-east-1')
@staticmethod
def process_data(data, *args, **kwargs):
return json.loads(data.to_json(orient='records'))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default')
self.assertIsNotNone(hook.get_conn())
@mock.patch('airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_records_with_schema(self, mock_get_pandas_df):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'id',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.providers.amazon.aws.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name="test_airflow",
task_id='hive_to_dynamodb_check',
table_keys=['id'],
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
@mock.patch('airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid'), ('1', 'gupta')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_pre_process_records_with_schema(self, mock_get_pandas_df):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'id',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.providers.amazon.aws.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name='test_airflow',
task_id='hive_to_dynamodb_check',
table_keys=['id'],
pre_process=self.process_data,
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
if __name__ == '__main__':
unittest.main()
| 34.896552 | 106 | 0.629051 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | JCoder01/airflow | tests/providers/amazon/aws/operators/test_hive_to_dynamodb.py | 5,060 | Python |
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST.
# See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto
# for the underlying protos mentioned in this file. See
# https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax.
from collections import defaultdict
import json
import functools
import os
import pathlib
import re
import string
import sys
from google.protobuf import json_format
from bazel_tools.tools.python.runfiles import runfiles
import yaml
# We have to do some evil things to sys.path due to the way that Python module
# resolution works; we have both tools/ trees in bazel_tools and envoy. By
# default, Bazel leaves us with a sys.path in which the @bazel_tools repository
# takes precedence. Now that we're done with importing runfiles above, we can
# just remove it from the sys.path.
sys.path = [p for p in sys.path if not p.endswith('bazel_tools')]
from tools.api_proto_plugin import annotations
from tools.api_proto_plugin import plugin
from tools.api_proto_plugin import visitor
from tools.config_validation import validate_fragment
from tools.protodoc import manifest_pb2
from udpa.annotations import security_pb2
from udpa.annotations import status_pb2
from validate import validate_pb2
# Namespace prefix for Envoy core APIs.
ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.'
# Namespace prefix for Envoy top-level APIs.
ENVOY_PREFIX = '.envoy.'
# Namespace prefix for WKTs.
WKT_NAMESPACE_PREFIX = '.google.protobuf.'
# Namespace prefix for RPCs.
RPC_NAMESPACE_PREFIX = '.google.rpc.'
# http://www.fileformat.info/info/unicode/char/2063/index.htm
UNICODE_INVISIBLE_SEPARATOR = u'\u2063'
# Template for data plane API URLs.
DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format(
os.environ['ENVOY_BLOB_SHA'])
# Template for formating extension descriptions.
EXTENSION_TEMPLATE = string.Template("""$anchor
This extension may be referenced by the qualified name *$extension*
.. note::
$status
$security_posture
""")
# Template for formating extension's category/ies.
EXTENSION_CATEGORIES_TEMPLATE = string.Template("""
.. tip::
$message:
$categories
""")
# Template for formating an extension category.
EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor
.. tip::
This extension category has the following known extensions:
$extensions
""")
# A map from the extension security postures (as defined in the
# envoy_cc_extension build macro) to human readable text for extension docs.
EXTENSION_SECURITY_POSTURES = {
'robust_to_untrusted_downstream':
'This extension is intended to be robust against untrusted downstream traffic. It '
'assumes that the upstream is trusted.',
'robust_to_untrusted_downstream_and_upstream':
'This extension is intended to be robust against both untrusted downstream and '
'upstream traffic.',
'requires_trusted_downstream_and_upstream':
'This extension is not hardened and should only be used in deployments'
' where both the downstream and upstream are trusted.',
'unknown':
'This extension has an unknown security posture and should only be '
'used in deployments where both the downstream and upstream are '
'trusted.',
'data_plane_agnostic':
'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.',
}
# A map from the extension status value to a human readable text for extension
# docs.
EXTENSION_STATUS_VALUES = {
'alpha':
'This extension is functional but has not had substantial production burn time, use only with this caveat.',
'wip':
'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.',
}
EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text())
# create an index of extension categories from extension db
EXTENSION_CATEGORIES = {}
for _k, _v in EXTENSION_DB.items():
for _cat in _v['categories']:
EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k)
class ProtodocError(Exception):
"""Base error class for the protodoc module."""
def HideNotImplemented(comment):
"""Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?"""
return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations
def GithubUrl(type_context):
"""Obtain data plane API Github URL by path from a TypeContext.
Args:
type_context: type_context.TypeContext for node.
Returns:
A string with a corresponding data plane API GitHub Url.
"""
if type_context.location is not None:
return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name,
type_context.location.span[0])
return ''
def FormatCommentWithAnnotations(comment, type_name=''):
"""Format a comment string with additional RST for annotations.
Args:
comment: comment string.
type_name: optional, 'message' or 'enum' may be specified for additional
message/enum specific annotations.
Returns:
A string with additional RST from annotations.
"""
formatted_extension = ''
if annotations.EXTENSION_ANNOTATION in comment.annotations:
extension = comment.annotations[annotations.EXTENSION_ANNOTATION]
formatted_extension = FormatExtension(extension)
formatted_extension_category = ''
if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations:
formatted_extension_category = FormatExtensionCategory(
comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION])
comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n')
return comment + formatted_extension + formatted_extension_category
def MapLines(f, s):
"""Apply a function across each line in a flat string.
Args:
f: A string transform function for a line.
s: A string consisting of potentially multiple lines.
Returns:
A flat string with f applied to each line.
"""
return '\n'.join(f(line) for line in s.split('\n'))
def Indent(spaces, line):
"""Indent a string."""
return ' ' * spaces + line
def IndentLines(spaces, lines):
"""Indent a list of strings."""
return map(functools.partial(Indent, spaces), lines)
def FormatInternalLink(text, ref):
return ':ref:`%s <%s>`' % (text, ref)
def FormatExternalLink(text, ref):
return '`%s <%s>`_' % (text, ref)
def FormatHeader(style, text):
"""Format RST header.
Args:
style: underline style, e.g. '=', '-'.
text: header text
Returns:
RST formatted header.
"""
return '%s\n%s\n\n' % (text, style * len(text))
def FormatExtension(extension):
"""Format extension metadata as RST.
Args:
extension: the name of the extension, e.g. com.acme.foo.
Returns:
RST formatted extension description.
"""
try:
extension_metadata = EXTENSION_DB[extension]
anchor = FormatAnchor('extension_' + extension)
status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '')
security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']]
extension = EXTENSION_TEMPLATE.substitute(anchor=anchor,
extension=extension,
status=status,
security_posture=security_posture)
categories = FormatExtensionList(extension_metadata["categories"], "extension_category")
cat_or_cats = "categories" if len(categories) > 1 else "category"
category_message = f"This extension extends and can be used with the following extension {cat_or_cats}"
extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message,
categories=categories)
return f"{extension}\n\n{extension_category}"
except KeyError as e:
sys.stderr.write(
'\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n')
exit(1) # Raising the error buries the above message in tracebacks.
def FormatExtensionList(items, prefix="extension", indent=2):
indent = " " * indent
formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items)
return f"{formatted_list}\n{indent}\n"
def FormatExtensionCategory(extension_category):
"""Format extension metadata as RST.
Args:
extension_category: the name of the extension_category, e.g. com.acme.
Returns:
RST formatted extension category description.
"""
try:
extensions = EXTENSION_CATEGORIES[extension_category]
except KeyError as e:
raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n")
anchor = FormatAnchor('extension_category_' + extension_category)
extensions = FormatExtensionList(sorted(extensions))
return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions)
def FormatHeaderFromFile(style, source_code_info, proto_name):
"""Format RST header based on special file level title
Args:
style: underline style, e.g. '=', '-'.
source_code_info: SourceCodeInfo object.
proto_name: If the file_level_comment does not contain a user specified
title, use this as page title.
Returns:
RST formatted header, and file level comment without page title strings.
"""
anchor = FormatAnchor(FileCrossRefLabel(proto_name))
stripped_comment = annotations.WithoutAnnotations(
StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments)))
formatted_extension = ''
if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations:
extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION]
formatted_extension = FormatExtension(extension)
if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations:
return anchor + FormatHeader(
style, source_code_info.file_level_annotations[
annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment
return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment
def FormatFieldTypeAsJson(type_context, field):
"""Format FieldDescriptorProto.Type as a pseudo-JSON string.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return: RST formatted pseudo-JSON string representation of field type.
"""
if TypeNameFromFQN(field.type_name) in type_context.map_typenames:
return '"{...}"'
if field.label == field.LABEL_REPEATED:
return '[]'
if field.type == field.TYPE_MESSAGE:
return '"{...}"'
return '"..."'
def FormatMessageAsJson(type_context, msg):
"""Format a message definition DescriptorProto as a pseudo-JSON block.
Args:
type_context: contextual information for message/enum/field.
msg: message definition DescriptorProto.
Return: RST formatted pseudo-JSON string representation of message definition.
"""
lines = []
for index, field in enumerate(msg.field):
field_type_context = type_context.ExtendField(index, field.name)
leading_comment = field_type_context.leading_comment
if HideNotImplemented(leading_comment):
continue
lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field)))
if lines:
return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n'
else:
return '.. code-block:: json\n\n {}\n\n'
def NormalizeFieldTypeName(field_fqn):
"""Normalize a fully qualified field type name, e.g.
.envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
field_fqn: a fully qualified type name from FieldDescriptorProto.type_name.
Return: Normalized type name.
"""
if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX):
return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):]
if field_fqn.startswith(ENVOY_PREFIX):
return field_fqn[len(ENVOY_PREFIX):]
return field_fqn
def NormalizeTypeContextName(type_name):
"""Normalize a type name, e.g.
envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
type_name: a name from a TypeContext.
Return: Normalized type name.
"""
return NormalizeFieldTypeName(QualifyTypeName(type_name))
def QualifyTypeName(type_name):
return '.' + type_name
def TypeNameFromFQN(fqn):
return fqn[1:]
def FormatEmph(s):
"""RST format a string for emphasis."""
return '*%s*' % s
def FormatFieldType(type_context, field):
"""Format a FieldDescriptorProto type description.
Adds cross-refs for message types.
TODO(htuch): Add cross-refs for enums as well.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return: RST formatted field type.
"""
if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith(
ENVOY_PREFIX):
type_name = NormalizeFieldTypeName(field.type_name)
if field.type == field.TYPE_MESSAGE:
if type_context.map_typenames and TypeNameFromFQN(
field.type_name) in type_context.map_typenames:
return 'map<%s, %s>' % tuple(
map(functools.partial(FormatFieldType, type_context),
type_context.map_typenames[TypeNameFromFQN(field.type_name)]))
return FormatInternalLink(type_name, MessageCrossRefLabel(type_name))
if field.type == field.TYPE_ENUM:
return FormatInternalLink(type_name, EnumCrossRefLabel(type_name))
elif field.type_name.startswith(WKT_NAMESPACE_PREFIX):
wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):]
return FormatExternalLink(
wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' %
wkt.lower())
elif field.type_name.startswith(RPC_NAMESPACE_PREFIX):
rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):]
return FormatExternalLink(
rpc,
'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower())
elif field.type_name:
return field.type_name
pretty_type_names = {
field.TYPE_DOUBLE: 'double',
field.TYPE_FLOAT: 'float',
field.TYPE_INT32: 'int32',
field.TYPE_SFIXED32: 'int32',
field.TYPE_SINT32: 'int32',
field.TYPE_FIXED32: 'uint32',
field.TYPE_UINT32: 'uint32',
field.TYPE_INT64: 'int64',
field.TYPE_SFIXED64: 'int64',
field.TYPE_SINT64: 'int64',
field.TYPE_FIXED64: 'uint64',
field.TYPE_UINT64: 'uint64',
field.TYPE_BOOL: 'bool',
field.TYPE_STRING: 'string',
field.TYPE_BYTES: 'bytes',
}
if field.type in pretty_type_names:
return FormatExternalLink(pretty_type_names[field.type],
'https://developers.google.com/protocol-buffers/docs/proto#scalar')
raise ProtodocError('Unknown field type ' + str(field.type))
def StripLeadingSpace(s):
"""Remove leading space in flat comment strings."""
return MapLines(lambda s: s[1:], s)
def FileCrossRefLabel(msg_name):
"""File cross reference label."""
return 'envoy_api_file_%s' % msg_name
def MessageCrossRefLabel(msg_name):
"""Message cross reference label."""
return 'envoy_api_msg_%s' % msg_name
def EnumCrossRefLabel(enum_name):
"""Enum cross reference label."""
return 'envoy_api_enum_%s' % enum_name
def FieldCrossRefLabel(field_name):
"""Field cross reference label."""
return 'envoy_api_field_%s' % field_name
def EnumValueCrossRefLabel(enum_value_name):
"""Enum value cross reference label."""
return 'envoy_api_enum_value_%s' % enum_value_name
def FormatAnchor(label):
"""Format a label as an Envoy API RST anchor."""
return '.. _%s:\n\n' % label
def FormatSecurityOptions(security_option, field, type_context, edge_config):
sections = []
if security_option.configure_for_untrusted_downstream:
sections.append(
Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.'))
if security_option.configure_for_untrusted_upstream:
sections.append(
Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.'))
if edge_config.note:
sections.append(Indent(4, edge_config.note))
example_dict = json_format.MessageToDict(edge_config.example)
validate_fragment.ValidateFragment(field.type_name[1:], example_dict)
field_name = type_context.name.split('.')[-1]
example = {field_name: example_dict}
sections.append(
Indent(4, 'Example configuration for untrusted environments:\n\n') +
Indent(4, '.. code-block:: yaml\n\n') +
'\n'.join(IndentLines(6,
yaml.dump(example).split('\n'))))
return '.. attention::\n' + '\n\n'.join(sections)
def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest):
"""Format a FieldDescriptorProto as RST definition list item.
Args:
outer_type_context: contextual information for enclosing message.
type_context: contextual information for message/enum/field.
field: FieldDescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition list item.
"""
field_annotations = []
anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name)))
if field.options.HasExtension(validate_pb2.rules):
rule = field.options.Extensions[validate_pb2.rules]
if ((rule.HasField('message') and rule.message.required) or
(rule.HasField('duration') and rule.duration.required) or
(rule.HasField('string') and rule.string.min_len > 0) or
(rule.HasField('string') and rule.string.min_bytes > 0) or
(rule.HasField('repeated') and rule.repeated.min_items > 0)):
field_annotations = ['*REQUIRED*']
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment)
if HideNotImplemented(leading_comment):
return ''
if field.HasField('oneof_index'):
oneof_context = outer_type_context.ExtendOneof(field.oneof_index,
type_context.oneof_names[field.oneof_index])
oneof_comment = oneof_context.leading_comment
formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment)
if HideNotImplemented(oneof_comment):
return ''
# If the oneof only has one field and marked required, mark the field as required.
if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[
field.oneof_index]:
field_annotations = ['*REQUIRED*']
if len(type_context.oneof_fields[field.oneof_index]) > 1:
# Fields in oneof shouldn't be marked as required when we have oneof comment below it.
field_annotations = []
oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[
field.oneof_index] else '\nOnly one of %s may be set.\n'
formatted_oneof_comment += oneof_template % ', '.join(
FormatInternalLink(
f,
FieldCrossRefLabel(NormalizeTypeContextName(
outer_type_context.ExtendField(i, f).name)))
for i, f in type_context.oneof_fields[field.oneof_index])
else:
formatted_oneof_comment = ''
# If there is a udpa.annotations.security option, include it after the comment.
if field.options.HasExtension(security_pb2.security):
manifest_description = protodoc_manifest.fields.get(type_context.name)
if not manifest_description:
raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name)
formatted_security_options = FormatSecurityOptions(
field.options.Extensions[security_pb2.security], field, type_context,
manifest_description.edge_config)
else:
formatted_security_options = ''
pretty_label_names = {
field.LABEL_OPTIONAL: '',
field.LABEL_REPEATED: '**repeated** ',
}
comment = '(%s) ' % ', '.join(
[pretty_label_names[field.label] + FormatFieldType(type_context, field)] +
field_annotations) + formatted_leading_comment
return anchor + field.name + '\n' + MapLines(functools.partial(
Indent, 2), comment + formatted_oneof_comment) + formatted_security_options
def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest):
"""Format a DescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
msg: DescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition list item.
"""
type_context.oneof_fields = defaultdict(list)
type_context.oneof_required = defaultdict(bool)
type_context.oneof_names = defaultdict(list)
for index, field in enumerate(msg.field):
if field.HasField('oneof_index'):
leading_comment = type_context.ExtendField(index, field.name).leading_comment
if HideNotImplemented(leading_comment):
continue
type_context.oneof_fields[field.oneof_index].append((index, field.name))
for index, oneof_decl in enumerate(msg.oneof_decl):
if oneof_decl.options.HasExtension(validate_pb2.required):
type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required]
type_context.oneof_names[index] = oneof_decl.name
return '\n'.join(
FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name),
field, protodoc_manifest)
for index, field in enumerate(msg.field)) + '\n'
def FormatEnumValueAsDefinitionListItem(type_context, enum_value):
"""Format a EnumValueDescriptorProto as RST definition list item.
Args:
type_context: contextual information for message/enum/field.
enum_value: EnumValueDescriptorProto.
Returns:
RST formatted definition list item.
"""
anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name)))
default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else ''
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment)
if HideNotImplemented(leading_comment):
return ''
comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment
return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment)
def FormatEnumAsDefinitionList(type_context, enum):
"""Format a EnumDescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
enum: DescriptorProto.
Returns:
RST formatted definition list item.
"""
return '\n'.join(
FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name),
enum_value)
for index, enum_value in enumerate(enum.value)) + '\n'
def FormatProtoAsBlockComment(proto):
"""Format a proto as a RST block comment.
Useful in debugging, not usually referenced.
"""
return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n'
class RstFormatVisitor(visitor.Visitor):
"""Visitor to generate a RST representation from a FileDescriptor proto.
See visitor.Visitor for visitor method docs comments.
"""
def __init__(self):
r = runfiles.Create()
with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f:
# Load as YAML, emit as JSON and then parse as proto to provide type
# checking.
protodoc_manifest_untyped = yaml.safe_load(f.read())
self.protodoc_manifest = manifest_pb2.Manifest()
json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest)
def VisitEnum(self, enum_proto, type_context):
normal_enum_type = NormalizeTypeContextName(type_context.name)
anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type))
header = FormatHeader('-', 'Enum %s' % normal_enum_type)
github_url = GithubUrl(type_context)
proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n'
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum')
if HideNotImplemented(leading_comment):
return ''
return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList(
type_context, enum_proto)
def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):
# Skip messages synthesized to represent map types.
if msg_proto.options.map_entry:
return ''
normal_msg_type = NormalizeTypeContextName(type_context.name)
anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type))
header = FormatHeader('-', normal_msg_type)
github_url = GithubUrl(type_context)
proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n'
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message')
if HideNotImplemented(leading_comment):
return ''
return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson(
type_context, msg_proto) + FormatMessageAsDefinitionList(
type_context, msg_proto,
self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums)
def VisitFile(self, file_proto, type_context, services, msgs, enums):
has_messages = True
if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums):
has_messages = False
# TODO(mattklein123): The logic in both the doc and transform tool around files without messages
# is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs
# in the common case.
if (has_messages and
not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations
and file_proto.name.startswith('envoy')):
raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format(
file_proto.name))
# Find the earliest detached comment, attribute it to file level.
# Also extract file level titles if any.
header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name)
# If there are no messages, we don't include in the doc tree (no support for
# service rendering yet). We allow these files to be missing from the
# toctrees.
if not has_messages:
header = ':orphan:\n\n' + header
warnings = ''
if file_proto.options.HasExtension(status_pb2.file_status):
if file_proto.options.Extensions[status_pb2.file_status].work_in_progress:
warnings += ('.. warning::\n This API is work-in-progress and is '
'subject to breaking changes.\n\n')
debug_proto = FormatProtoAsBlockComment(file_proto)
return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto
def Main():
plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)])
if __name__ == '__main__':
Main()
| 37.362517 | 122 | 0.727482 | [
"Apache-2.0"
] | Gsantomaggio/envoy | tools/protodoc/protodoc.py | 27,312 | Python |
# coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from . import _utilities
import typing
# Export this package's modules as members:
from .foo import *
from .provider import *
_utilities.register(
resource_modules="""
[
{
"pkg": "example",
"mod": "",
"fqn": "pulumi_example",
"classes": {
"example::Foo": "Foo"
}
}
]
""",
resource_packages="""
[
{
"pkg": "example",
"token": "pulumi:providers:example",
"fqn": "pulumi_example",
"class": "Provider"
}
]
"""
)
| 17.352941 | 80 | 0.615254 | [
"Apache-2.0"
] | 64bit/pulumi | pkg/codegen/testing/test/testdata/simple-methods-schema-single-value-returns/python/pulumi_example/__init__.py | 590 | Python |
from django import forms
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404, HttpResponseForbidden, HttpResponse
from django.shortcuts import render
from django.views.generic import TemplateView, View
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic.list import ListView
import json
from .models import Profile, Friendship, Message
from .forms import ProfileRegisterForm
from registration.models import RegistrationProfile
#messagescreate
#def form_valid(self, form):
# user = self.request.user
# form.instance.user = user
# valid_data = super(nomedaview, self).form_valid(form)
# adicione qq coisa apos a mensagem criada
# return valid_data
# Create your views here.
### CREATE ###
class ProfileRegister(CreateView):
model = Profile
form_class = ProfileRegisterForm
success_url = "/accounts/register/complete/"
def form_valid(self, form):
#saving the user first
#user = User.objects.create_user(self.request.POST.get('name'), self.request.POST.get('email'), self.request.POST.get('password'))
#user = User.objects.create_user(form.cleaned_data['name'], form.cleaned_data['email'], form.cleaned_data['password'])
#user.save()
user = RegistrationProfile.objects.create_inactive_user(username=form.cleaned_data['username'],
password=form.cleaned_data['password'],
email=form.cleaned_data['email'],
site=1)
#creating the profile
profile = form.save(commit=False)
#add user_id on profile
profile.user_id = user.id
profile.save()
#saving many to many relationship
form.save_m2m()
return HttpResponseRedirect(self.success_url)
def get_context_data(self, **kwargs):
context = super(ProfileRegister, self).get_context_data(**kwargs)
context["site_name"] = "What's the Craic?"
context["title"] = "- Add Profile"
context["submit_btn"] = "Create Account"
return context
### UPDATE ###
class ProfileUpdate(SuccessMessageMixin, UpdateView):
model = Profile
#form_class = ProfileRegisterForm#
fields = ['name', 'picture', 'nui_id', 'staff', 'native', 'learning', 'about']
widgets={
"about":forms.Textarea(attrs={'class':'form-control materialize-textarea'}),
}
success_url = "/dashboard/"
template_name = 'profiles/profile_edit.html'
success_message = "Profile was updated successfully"
def dispatch(self, request, *args, **kwargs):
user = self.request.user
obj = self.get_object()
if obj.user != user:
messages.error(self.request, 'This profile is not yours.')
return HttpResponseRedirect(reverse('dashboard'))
return super(ProfileUpdate, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ProfileUpdate, self).get_context_data(**kwargs)
context["site_name"] = "What's the Craic?"
context["title"] = "- Update Profile"
context["submit_btn"] = "Update Account"
return context
class ProfileDetailView(DetailView):
model = Profile
def get_context_data(self, **kwargs):
context = super(ProfileDetailView, self).get_context_data(**kwargs)
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
new_messages_count = profile.get_new_messages_count()
view_profile = Profile.objects.get(id=self.kwargs['pk'])
are_friends = profile.are_friends(view_profile)
waiting = profile.waiting_friendship_approval(view_profile)
if waiting:
if profile == waiting:
context["waiting"] = "WAITING APPROVAL"
else:
context["waiting"] = "ACCEPT REQUEST"
context["profile"] = profile
context["are_friends"] = are_friends
context["new_messages_count"] = new_messages_count
context["site_name"] = "What's the Craic?"
context["title"] = "- Add User"
context["submit_btn"] = "Create Account"
return context
class FindFriends(SuccessMessageMixin, TemplateView):
template_name = "findfriends.html"
def get_context_data(self, **kwargs):
context = super(FindFriends, self).get_context_data(**kwargs)
#TESTS WE NEED
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
possible_friends = Profile.find_friends(profile)
friendships_requests = profile.find_friendships_requests()
waiting_approvals = profile.get_waiting_approvals()
new_messages_count = profile.get_new_messages_count()
#ALL CONTEXT VARIABLES
context["profile"] = profile
context["possible_friends"] = possible_friends
context["friendships_requests"] = friendships_requests
context["waiting_approvals"] = waiting_approvals
context["new_messages_count"] = new_messages_count
context["site_name"] = "What's the Craic?"
context["title"] = ""
context["submit_btn"] = ""
return context
class AddFriend(SuccessMessageMixin, View):
template_name = "add_friend.html"
def dispatch(self, request, *args, **kwargs):
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
view_profile = Profile.objects.get(id=self.kwargs['pk'])
friendship = Friendship(from_user=profile, to_user=view_profile, status=False)
friendship.save()
messages.info(self.request, 'Friendship requested.')
return HttpResponseRedirect(reverse('profiles:view', kwargs={'pk':self.kwargs['pk']}))
class AcceptFriendship(SuccessMessageMixin, View):
template_name = "add_friend.html"
def dispatch(self, request, *args, **kwargs):
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
view_profile = Profile.objects.get(id=self.kwargs['pk'])
friendship = Friendship.objects.get(from_user=view_profile, to_user=profile)
friendship.status = True
friendship.save()
messages.info(self.request, 'Friendship accepted.')
return HttpResponseRedirect(reverse('profiles:view', kwargs={'pk':self.kwargs['pk']}))
class SendMessage(SuccessMessageMixin, TemplateView):
template_name = "sendmessage.html"
def get_context_data(self, **kwargs):
context = super(SendMessage, self).get_context_data(**kwargs)
#TESTS WE NEED
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
message_for = Profile.objects.get(id=self.kwargs['pk'])
new_messages_count = profile.get_new_messages_count()
#ALL CONTEXT VARIABLES
context["profile"] = profile
context["message_for"] = message_for
context["new_messages_count"] = new_messages_count
context["site_name"] = "What's the Craic?"
context["title"] = ""
context["submit_btn"] = ""
return context
def post(self, request, *args, **kwargs):
#form = self.form_class(request.POST)
form = (self.request.POST)
profile = Profile.objects.get(user=self.request.user)
message_for = Profile.objects.get(id=self.kwargs['pk'])
if form['message']:
message = Message(from_user=profile, to_user=message_for, message=form['message'])
message.save()
messages.success(request, 'Message was sent')
return HttpResponseRedirect(reverse('profiles:sentmessages'))
class SentMessages(SuccessMessageMixin, ListView):
template_name = "sentmessages.html"
paginate_by = 10
context_object_name = "sent_messages"
def get_queryset(self):
profile = Profile.objects.get(user=self.request.user)
return Message.objects.filter(from_user = profile).order_by('-date')
def get_context_data(self, **kwargs):
context = super(SentMessages, self).get_context_data(**kwargs)
#TESTS WE NEED
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
#sent_messages = Message.objects.filter(from_user = profile).order_by('-date')
new_messages_count = profile.get_new_messages_count()
#ALL CONTEXT VARIABLES
context["profile"] = profile
context["new_messages_count"] = new_messages_count
context["site_name"] = "What's the Craic?"
context["title"] = ""
context["submit_btn"] = ""
return context
class Inbox(SuccessMessageMixin, ListView):
template_name = "inbox.html"
paginate_by = 10
context_object_name = "inbox_messages"
def get_queryset(self):
profile = Profile.objects.get(user=self.request.user)
return Message.objects.filter(to_user = profile).order_by('-date')
def get_context_data(self, **kwargs):
context = super(Inbox, self).get_context_data(**kwargs)
#TESTS WE NEED
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
new_messages_count = profile.get_new_messages_count()
#ALL CONTEXT VARIABLES
context["profile"] = profile
context["new_messages_count"] = new_messages_count
context["site_name"] = "What's the Craic?"
context["title"] = ""
context["submit_btn"] = ""
return context
class VizualizedMessage(SuccessMessageMixin, View):
template_name = "add_friend.html"
def dispatch(self, request, *args, **kwargs):
response_data = {}
message = Message.objects.get(id=request.POST.get('messageId'))
message.visualized = True
message.save()
response_data['result'] = 'Message visualized!'
response_data['visualized'] = message.visualized
return HttpResponse(
json.dumps(response_data),
content_type="application/json"
) | 33.621818 | 132 | 0.742808 | [
"MIT"
] | caiocsalvador/whats_the_craic | src/profiles/views.py | 9,246 | Python |
# pylint: disable=R,C,W
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author : Nasir Khan (r0ot h3x49)
Github : https://github.com/r0oth3x49
License : MIT
Copyright (c) 2018-2025 Nasir Khan (r0ot h3x49)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from udemy.compat import time, sys
from udemy.logger import logger
from udemy.extract import Udemy
from udemy.shared import (
UdemyCourse,
UdemyCourses,
UdemyChapters,
UdemyLectures,
UdemyLectureStream,
UdemyLectureAssets,
UdemyLectureSubtitles,
)
class InternUdemyCourses(UdemyCourses, Udemy):
def __init__(self, *args, **kwargs):
super(InternUdemyCourses, self).__init__(*args, **kwargs)
def _fetch_course(self):
auth = {}
if not self._cookies:
auth = self._login(username=self._username, password=self._password)
if not auth and self._cookies:
auth = self._login(cookies=self._cookies)
if auth.get("login") == "successful":
logger.info(msg="Logged in successfully.", new_line=True)
logger.info(msg="Fetching all enrolled course(s) url(s)..")
self._courses = self._extract_subscribed_courses()
time.sleep(1)
logger.success(msg="Fetching all enrolled course(s) url(s).. ")
self._logout()
if auth.get("login") == "failed":
logger.error(msg="Failed to login ..\n")
sys.exit(0)
class InternUdemyCourse(UdemyCourse, Udemy):
def __init__(self, *args, **kwargs):
self._info = ""
super(InternUdemyCourse, self).__init__(*args, **kwargs)
def _fetch_course(self):
if self._have_basic:
return
auth = {}
if not self._cookies:
auth = self._login(username=self._username, password=self._password)
if not auth and self._cookies:
auth = self._login(cookies=self._cookies)
if auth.get("login") == "successful":
logger.info(msg="Logged in successfully.", new_line=True)
logger.info(msg="Downloading course information ..")
self._info = self._real_extract(
self._url, skip_hls_stream=self._skip_hls_stream
)
time.sleep(1)
logger.success(msg="Downloaded course information .. ")
access_token = self._info["access_token"]
self._id = self._info["course_id"]
self._title = self._info["course_title"]
self._chapters_count = self._info["total_chapters"]
self._total_lectures = self._info["total_lectures"]
self._chapters = [
InternUdemyChapter(z, access_token=access_token)
for z in self._info["chapters"]
]
logger.info(
msg="Trying to logout now...",
new_line=True,
)
if not self._cookies:
self._logout()
logger.info(
msg="Logged out successfully.",
new_line=True,
)
self._have_basic = True
if auth.get("login") == "failed":
logger.error(msg="Failed to login ..\n")
sys.exit(0)
class InternUdemyChapter(UdemyChapters):
def __init__(self, chapter, access_token=None):
super(InternUdemyChapter, self).__init__()
self._chapter_id = chapter["chapter_id"]
self._chapter_title = chapter["chapter_title"]
self._chapter_index = chapter["chapter_index"]
self._lectures_count = chapter.get("lectures_count", 0)
self._lectures = (
[
InternUdemyLecture(z, access_token=access_token)
for z in chapter["lectures"]
]
if self._lectures_count > 0
else []
)
class InternUdemyLecture(UdemyLectures):
def __init__(self, lectures, access_token=None):
super(InternUdemyLecture, self).__init__()
self._access_token = access_token
self._info = lectures
self._lecture_id = self._info["lectures_id"]
self._lecture_title = self._info["lecture_title"]
self._lecture_index = self._info["lecture_index"]
self._subtitles_count = self._info.get("subtitle_count", 0)
self._sources_count = self._info.get("sources_count", 0)
self._assets_count = self._info.get("assets_count", 0)
self._extension = self._info.get("extension")
self._html_content = self._info.get("html_content")
self._duration = self._info.get("duration")
if self._duration:
duration = int(self._duration)
(mins, secs) = divmod(duration, 60)
(hours, mins) = divmod(mins, 60)
if hours == 0:
self._duration = "%02d:%02d" % (mins, secs)
else:
self._duration = "%02d:%02d:%02d" % (hours, mins, secs)
def _process_streams(self):
streams = (
[InternUdemyLectureStream(z, self) for z in self._info["sources"]]
if self._sources_count > 0
else []
)
self._streams = sorted(streams, key=lambda k: k.quality)
self._streams = sorted(self._streams, key=lambda k: k.mediatype)
def _process_assets(self):
assets = (
[InternUdemyLectureAssets(z, self) for z in self._info["assets"]]
if self._assets_count > 0
else []
)
self._assets = assets
def _process_subtitles(self):
subtitles = (
[InternUdemyLectureSubtitles(z, self) for z in self._info["subtitles"]]
if self._subtitles_count > 0
else []
)
self._subtitles = subtitles
class InternUdemyLectureStream(UdemyLectureStream):
def __init__(self, sources, parent):
super(InternUdemyLectureStream, self).__init__(parent)
self._mediatype = sources.get("type")
self._extension = sources.get("extension")
self._token = parent._access_token
height = sources.get("height", "0")
width = sources.get("width", "0")
self._resolution = "%sx%s" % (width, height)
self._dimension = width, height
self._quality = int(height)
self._is_hls = "hls" in self._mediatype
self._url = sources.get("download_url")
class InternUdemyLectureAssets(UdemyLectureAssets):
def __init__(self, assets, parent):
super(InternUdemyLectureAssets, self).__init__(parent)
self._mediatype = assets.get("type")
self._extension = assets.get("extension")
title = assets.get("title", "")
if not title:
title = assets.get("filename")
if title and title.endswith(self._extension):
ok = "{0:03d} ".format(parent._lecture_index) + title
self._filename = ok
else:
ok = "{0:03d} ".format(parent._lecture_index) + assets.get("filename")
self._filename = ok
self._url = assets.get("download_url")
class InternUdemyLectureSubtitles(UdemyLectureSubtitles):
def __init__(self, subtitles, parent):
super(InternUdemyLectureSubtitles, self).__init__(parent)
self._mediatype = subtitles.get("type")
self._extension = subtitles.get("extension")
self._language = subtitles.get("language")
self._url = subtitles.get("download_url")
| 38.255708 | 168 | 0.626641 | [
"MIT"
] | 0x2f0713/udemy-dl | udemy/internal.py | 8,378 | Python |
import argparse
import datetime
import time
import torch
from torch import distributed as dist
from torch.nn import DataParallel
from torch.nn.parallel.distributed import DistributedDataParallel
from distillation.tool import DistillationBox
from models import load_ckpt, get_model, save_ckpt, set_bottleneck_transformer
from myutils.common import file_util, yaml_util
from myutils.pytorch import func_util, module_util
from utils import data_util, main_util, misc_util
from models.mimic.base import set_width
from models.slimmable.compute_post_bn import ComputeBN
from torch.nn.modules.batchnorm import _BatchNorm
def freeze_batch_norm_outside_bottleneck(model):
for m in model.modules():
if isinstance(m, _BatchNorm):
m.eval()
model.backbone_net.bottleneck.train()
def get_argparser():
argparser = argparse.ArgumentParser(description='Mimic Runner')
argparser.add_argument('--config', required=True, help='yaml file path')
argparser.add_argument('--device', default='cuda', help='device')
argparser.add_argument('--json', help='dictionary to overwrite config')
argparser.add_argument('-distill', action='store_true', help='distill a teacher model')
argparser.add_argument('-skip_teacher_eval', action='store_true', help='skip teacher model evaluation in testing')
argparser.add_argument('-transform_bottleneck', action='store_true',
help='use bottleneck transformer (if defined in yaml) in testing')
argparser.add_argument('-post_bn', action='store_true', help='use post traing batch norm calculation')
# distributed training parameters
argparser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
argparser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return argparser
def freeze_modules(student_model, student_model_config):
if 'frozen_modules' in student_model_config:
for student_path in student_model_config['frozen_modules']:
student_module = module_util.get_module(student_model, student_path)
module_util.freeze_module_params(student_module)
elif 'unfrozen_modules' in student_model_config:
module_util.freeze_module_params(student_model)
for student_path in student_model_config['unfrozen_modules']:
student_module = module_util.get_module(student_model, student_path)
module_util.unfreeze_module_params(student_module)
def distill_model(distillation_box, data_loader, optimizer, log_freq, device, epoch):
metric_logger = misc_util.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc_util.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
lr_scheduler = None
if epoch == 0:
warmup_factor = 1.0 / 1000.0
warmup_iters = min(1000, len(data_loader) - 1)
lr_scheduler = main_util.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)
for images, targets in metric_logger.log_every(data_loader, log_freq, header):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
optimizer.zero_grad()
loss = distillation_box(images, targets)
loss.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
metric_logger.update(loss=loss)
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
# torch.cuda.empty_cache()
def distill(teacher_model, student_model, train_sampler, train_data_loader, val_data_loader,
device, distributed, distill_backbone_only, config, args):
train_config = config['train']
student_config = config['student_model']
distillation_box = DistillationBox(teacher_model, student_model,
train_config['criterion'], student_config)
ckpt_file_path = config['student_model']['ckpt']
optim_config = train_config['optimizer']
optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params'])
scheduler_config = train_config['scheduler']
lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params'])
if file_util.check_if_exists(ckpt_file_path):
best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler)
save_ckpt(student_model, optimizer, lr_scheduler, best_val_map, config, args, ckpt_file_path)
def main(args):
config = yaml_util.load_yaml_file(args.config)
if args.json is not None:
main_util.overwrite_config(config, args.json)
distributed, device_ids = main_util.init_distributed_mode(args.world_size, args.dist_url)
device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
teacher_model = get_model(config['teacher_model'], device)
module_util.freeze_module_params(teacher_model)
student_model_config = config['student_model']
student_model = get_model(student_model_config, device)
freeze_modules(student_model, student_model_config)
ckpt_file_path = config['student_model']['ckpt']
train_config = config['train']
optim_config = train_config['optimizer']
optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params'])
scheduler_config = train_config['scheduler']
lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params'])
if file_util.check_if_exists(ckpt_file_path):
best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler)
save_ckpt(student_model, optimizer, lr_scheduler, best_val_map, config, args, ckpt_file_path)
if __name__ == '__main__':
parser = get_argparser()
main(parser.parse_args())
| 47.814516 | 118 | 0.740597 | [
"MIT"
] | jsiloto/adaptive-cob | src/mimic_fix.py | 5,929 | Python |
#!/usr/bin/env python3
from fairseq import models
from fairseq.data import FairseqDataset, data_utils
from fairseq.models import FairseqMultiModel
from fairseq.tasks.multilingual_translation import MultilingualTranslationTask
from pytorch_translate.data import iterators as ptt_iterators
class PyTorchTranslateMultiTask(MultilingualTranslationTask):
def build_model(self, args):
model = models.build_model(args, self)
if not isinstance(model, FairseqMultiModel):
raise ValueError(
"PyTorchTranslateMultiTask requires a FairseqMultiModel architecture"
)
return model
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
):
assert isinstance(dataset, FairseqDataset)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
indices = data_utils.filter_by_size(
indices,
dataset.size,
max_positions,
raise_exception=(not ignore_invalid_inputs),
)
# create mini-batches with given size constraints
batch_sampler = data_utils.batch_by_size(
indices,
dataset.num_tokens,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
# return a reusable, sharded iterator
return ptt_iterators.WeightedEpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
weights=self.loss_weights,
)
def max_positions(self):
"""Return None to allow model to dictate max sentence length allowed"""
return None
| 31.457143 | 85 | 0.64941 | [
"BSD-3-Clause"
] | Meteorix/translate | pytorch_translate/tasks/pytorch_translate_multi_task.py | 2,202 | Python |
#!/usr/bin/python
#
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
"""
Swift upload image file.
This file contains implementation of gett
swift download URL for the uploaded image file
"""
from builtins import object
from builtins import str
from future import standard_library
standard_library.install_aliases() # noqa
import logging
import re
from threading import RLock
import time
from urllib.parse import urlparse
from ansible.module_utils.fabric_utils import FabricAnsibleModule
import requests
import swiftclient
import swiftclient.utils
DOCUMENTATION = '''
---
module: Swift file util
author: Juniper Networks
short_description: Private module to get swift download url of the image file
description:
- Pass the required swift config info get the download url of image file.
requirements:
-
options:
authtoken:
description:
- authentication token string
required: true
authurl:
description:
- authentication url string
required: true
user:
description:
- Swift username
type: string
required: true
key:
description:
- Swift password
type: string
required: true
tenant_name:
description:
- Tenant name.
type: string
required: false
default: 'admin'
auth_version:
description:
- Keystone Auth version.
required: false
default: '3.0'
temp_url_key:
description:
- Temp url key
required: true
temp_url_key_2:
description:
- Temp url key 2
required: true
connection_retry_count:
description:
- Connection retry count
type: int
required: false
default: 5
chosen_temp_url_key:
description:
- Chosen Temp url key
required: false
default: 'temp_url_key'
container_name:
description:
- Name of the container
required: true
filename:
description:
- Name of the image file
required: true
'''
EXAMPLES = '''
'''
RETURN = '''
url:
description:
- An image file url used to download the file without authentication.
returned: on success always
type: str
error_msg:
description:
- Its an error message that is returned if there is any exception or error.
returned: on failure
type: str
'''
connection_lock = RLock()
class FileSvcUtil(object): # pragma: no cover
def __init__(self, authtoken, authurl, user, key, tenant_name,
auth_version, container_name, temp_url_key,
temp_url_key2, connection_retry_count, chosen_temp_url_key):
"""Init routine."""
self.requests = requests
self.authurl = authurl
self.preauthtoken = authtoken
self.user = user
self.key = key
self.auth_version = auth_version
self.container_name = container_name
self.temp_url_key = temp_url_key
self.temp_url_key_2 = temp_url_key2
self.connection_retry_count = connection_retry_count
self.chosen_temp_url_key = chosen_temp_url_key
self.conn_timeout_sec = 10
self.tenant_name = tenant_name
self.generateToken()
self.updateAccount()
def generateToken(self):
retry_count = 0
incr_sleep = 10
while retry_count <= self.connection_retry_count:
try:
acquired = connection_lock.acquire()
swiftconn = swiftclient.client.Connection(
authurl=self.authurl,
user=self.user,
key=self.key,
preauthtoken=self.preauthtoken,
tenant_name=self.tenant_name,
auth_version=self.auth_version,
timeout=self.conn_timeout_sec,
insecure=True)
self.swift_conn = swiftconn
swiftconn.get_account()
self.storageurl = swiftconn.url
break
except Exception as e:
retry_count += 1
err_msg = e.message
logging.error(err_msg)
if retry_count == self.connection_retry_count:
raise Exception(
"Connection failed with swift server: " +
str(err_msg))
logging.error(
"Connection failed with swift server, retrying..")
incr_sleep *= 2
time.sleep(incr_sleep)
finally:
if acquired:
connection_lock.release()
def updateAccount(self):
headers = {'Temp-URL-Key': self.temp_url_key}
if self.temp_url_key_2 is not None:
headers['Temp-URL-Key-2'] = self.temp_url_key_2
try:
self.swift_conn.post_account(headers)
except Exception as err:
logging.error(str(err))
raise Exception(
"Update account failed with swift file server: " +
str(err))
def getobjectFileUri(self, filename):
return self.getFileObjUri(self.container_name, filename)
def getFileObjUri(self, container_name, fileobj_name):
return urlparse('/%s/%s' % (container_name, fileobj_name)).path
def getObjUrl(self, filename):
image_path = self.getobjectFileUri(filename)
try:
image_url = self.getPublicDownloadUrl(image_path)
return image_url
except Exception as e:
logging.error(str(e))
raise Exception(
"Get object url failed with swift file server: " + str(e))
def getPublicDownloadUrl(self, image_path):
return '%s/%s' % (
re.sub(r'([^/])/*$', r'\1', self.storageurl),
re.sub(r'^/*([^/])', r'\1', image_path))
def close(self):
if self.swift_conn:
self.swift_conn.close()
def main():
module = FabricAnsibleModule(
argument_spec=dict(
authtoken=dict(required=True),
authurl=dict(required=True),
user=dict(required=True),
key=dict(required=True),
tenant_name=dict(required=False, default="admin"),
auth_version=dict(required=False, default='3.0'),
temp_url_key=dict(required=True),
temp_url_key_2=dict(required=True),
chosen_temp_url_key=dict(required=False,
default="temp_url_key"),
container_name=dict(required=True),
filename=dict(required=True),
connection_retry_count=dict(required=False,
default=5, type='int')),
supports_check_mode=False)
m_args = module.params
authtoken = m_args['authtoken']
authurl = m_args['authurl']
user = m_args['user']
key = m_args['key']
tenant_name = m_args['tenant_name']
auth_version = m_args['auth_version']
temp_url_key = m_args['temp_url_key']
temp_url_key_2 = m_args['temp_url_key_2']
chosen_temp_url_key = m_args['chosen_temp_url_key']
container_name = m_args['container_name']
filename = m_args['filename']
connection_retry_count = m_args['connection_retry_count']
url = None
error_msg = ''
try:
fileutil = FileSvcUtil(
authtoken,
authurl,
user,
key,
tenant_name,
auth_version,
container_name,
temp_url_key,
temp_url_key_2,
connection_retry_count,
chosen_temp_url_key)
url = fileutil.getObjUrl(filename)
fileutil.close()
except Exception as e:
error_msg = "Exception occurred in swift_fileutil: " + str(e)
results = {}
results['url'] = url
results['error_msg'] = error_msg
module.exit_json(**results)
if __name__ == '__main__':
main()
| 29.158845 | 79 | 0.59168 | [
"Apache-2.0"
] | edwinpjacques/contrail-controller | src/config/fabric-ansible/ansible-playbooks/library/swift_fileutil.py | 8,077 | Python |
import sys
import logging
logger = logging.getLogger(__name__)
import base64
import json
import datetime
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric import padding
from mqfactory.message.security import Signature
import socket
class Decoded(object):
def __init__(self, keys):
self.keys = keys
def __getitem__(self, name):
key = self.keys[name]
if not isinstance(key["public"], rsa.RSAPublicKey):
return {
"private": decode(key["private"]),
"public" : decode(key["public"])
}
return key
class RsaSignature(Signature):
def __init__(self, keys, me=socket.gethostname()):
self.keys = Decoded(keys)
self.me = me
self.key = self.keys[self.me]["private"]
def _sign(self, message, ts=None):
logger.debug("signing {0}".format(message.id))
message.tags["signature"] = {
"origin" : self.me,
"ts" : ts or str(datetime.datetime.utcnow())
}
payload = serialize(message)
message.tags["signature"]["hash"] = base64.b64encode(sign(payload, self.key))
def _validate(self, message):
key = self.keys[message.tags["signature"]["origin"]]["public"]
signature = base64.b64decode(message.tags["signature"].pop("hash"))
payload = serialize(message)
validate(payload, signature, key)
message.tags.pop("signature")
# utility functions wrapping cryptography functions
def generate_key_pair():
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
return key, key.public_key()
def encode(key):
if isinstance(key, rsa.RSAPublicKey):
return key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
else:
return key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
def decode(pem):
try:
pem = pem.encode("ascii","ignore") # unicode -> str
except AttributeError:
pass
if b"PUBLIC KEY" in pem:
return serialization.load_pem_public_key(
pem,
backend=default_backend()
)
else:
return serialization.load_pem_private_key(
pem,
password=None,
backend=default_backend()
)
def serialize(message):
serialized = json.dumps({
"tags" : message.tags,
"payload" : message.payload
}, sort_keys=True).encode("utf-8")
return base64.b64encode(serialized)
def sign(payload, key):
return key.sign(
payload,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
def validate(message, signature, key):
key.verify(
signature,
message,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
| 25.282258 | 81 | 0.690909 | [
"MIT"
] | christophevg/py-mqfactory | mqfactory/message/security/rsa.py | 3,135 | Python |
from juno.resources import handler_request
from juno.resources.routes import subscription_routes
def create(dictionary):
return handler_request.post(subscription_routes.get_base_url(), dictionary)
def find_all():
return handler_request.get(subscription_routes.get_base_url())
def find_by_id(subscription_id):
return handler_request.get(
subscription_routes.get_specific_subscription_by_id_url(subscription_id)
)
def deactivation(subscription_id):
return handler_request.post(
subscription_routes.get_deactivation_subscription_url(subscription_id)
)
def activation(subscription_id):
return handler_request.post(
subscription_routes.get_activation_subscription_url(subscription_id)
)
def cancelation(subscription_id):
return handler_request.post(
subscription_routes.get_cancelation_subscription_url(subscription_id)
)
def completion(subscription_id):
return handler_request.post(
subscription_routes.get_completion_subscription_url(subscription_id)
)
| 25.658537 | 80 | 0.794677 | [
"MIT"
] | leogregianin/juno-python | juno/subscription.py | 1,052 | Python |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.ascend.expm1_run import expm1_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_expm1_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# testflag,opfuncname,testRunArgs, setdimArgs
("expm1_001", expm1_run, ([4, 3], 'float16')),
("expm1_002", expm1_run, ([4, 16], 'float32')),
("expm1_003", expm1_run, ([4, ], 'float16')),
("expm1_004", expm1_run, ([4, 3, 16], 'float16')),
("expm1_005", expm1_run, ([32, 1024], 'float32')),
]
self.testarg_rpc_cloud = [
# testflag,opfuncname,testRunArgs, setdimArgs
("expm1_006", expm1_run, ([4, 3], 'float16')),
("expm1_007", expm1_run, ([4, 3], 'float32')),
("expm1_008", expm1_run, ([4, ], 'float16')),
("expm1_009", expm1_run, ([4, 3, 16], 'float16')),
("expm1_010", expm1_run, ([32, 1024], 'float32')),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg)
def test_run_rpc_cloud(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg_rpc_cloud)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= {0} Teardown============".format(self.casename))
return
| 32.511628 | 89 | 0.546853 | [
"Apache-2.0"
] | mindspore-ai/akg | tests/st/ops/ascend/vector/test_expm1_001.py | 2,796 | Python |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from gfx.models import Material, Mesh, Shader, Model
import subprocess
import os
import base64
import platform
def get_mesh( request, mesh_id):
mesh = get_object_or_404(Mesh, pk=mesh_id)
"""
TODO: Make it grab it from the cache instead of generating it
every single time like some savage that can't handle living
in the twenty-first century
"""
if( False ):#exportedMesh.findwith( mesh_id )):
pass
#
# Can't find it, then grab the mesh, and export it
#
processName = None
sys = platform.system()
if( sys == "Darwin" ):
processName = '/Applications/blender.app/Contents/MacOS/blender'
else:
return HttpResponse("Unknown operating system `{}`".format(sys))
if( subprocess.call([processName,"--background", mesh.mesh.name, "--python","./gfx/export.py"]) == 1 ):
return HttpResponse("There was an error")
filename, fileExtension = os.path.splitext(mesh.mesh.name)
newFileContents = open("{0}.js".format(filename)).read()
return HttpResponse(newFileContents)
def get_texture( request, texture_id):
return HttpResponse("You're looking at texture %s." % texture_id )
def get_material( request, material_id):
material = get_object_or_404(Material, pk=material_id)
vertSource = "{}".format( material.getVertex() )
fragSource = "{}".format( material.getFragment() )
return HttpResponse(
"""{{
"id":{0},
"vertex":"{1}",
"fragment":"{2}"
}}
""".format(
material.id,
base64.b64encode( vertSource ),
base64.b64encode( fragSource ),
)
)
def get_shader( request, shader_id):
shader = get_object_or_404(Shader, pk=shader_id)
return HttpResponse(
"""{{
"id":{0},
"tag":"{1}",
"content":"{2}"
}}""".format(
shader.id,
shader.tag,
shader.content
)
)
def search_models( request ):
model = get_object_or_404(Model, name=request.GET.get('tag', None))
return HttpResponse(
"""{{
"id":{0},
"tag":"{1}",
"mesh_id":{2},
"material_id":{3}
}}""".format(
model.id,
model.name,
model.mesh.id,
model.material.id
)
)
| 20.650485 | 104 | 0.672779 | [
"MIT"
] | Etskh/TheDruid | gfx/views.py | 2,127 | Python |
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import argparse
def plot():
results_dir = './'
results_files = [result for result in os.listdir(results_dir) if 'MAESTROeX' in result]
n_gpus_per_node = 6
throughput_list = []
nnodes_list = []
for results_file in results_files:
nsteps = 0
nzones = 0
time = 0.0
for line in open(results_dir + results_file):
if len(line.split()) == 0:
continue
# Determine the number of MPI ranks and thus the number of nodes.
if len(line.split()) == 6 and line.split()[0] == 'MPI' and line.split()[1] == 'initialized':
n_ranks = int(line.split()[3])
n_nodes = max(1, n_ranks / n_gpus_per_node)
# For each step, add up the number of zones advanced and the walltime
# for that step.
if len(line.split()) == 4 and line.split()[0] == 'Level' and line.split()[1] == '0,' and line.split()[3] == 'cells':
nsteps += 1
nzones += int(line.split()[2])
if len(line.split()) == 6 and line.split()[0] == 'Time' and line.split()[2] == 'advance':
time += float(line.split()[5])
nnodes_list.append(n_nodes)
throughput_list.append(nzones / time / 1.e6)
# Now we have all the results, so plot them.
nnodes_arr = np.array(nnodes_list)
throughput_arr = np.array(throughput_list)
throughput_arr = np.array([x for _, x in sorted(zip(nnodes_arr, throughput_arr))])
nnodes_arr = sorted(nnodes_arr)
throughput_arr = throughput_arr / throughput_arr[0] / nnodes_arr
plt.plot(nnodes_arr, throughput_arr, linestyle='-', lw=4, marker='o', markersize=14)
plt.xlim([0.9 * min(nnodes_arr), 1.1 * max(nnodes_arr)])
plt.ylim([0, 1.25 * max(throughput_arr)])
plt.ylabel('Throughput (normalized)', fontsize=20)
plt.xlabel('Number of nodes', fontsize=20)
plt.title('Weak scaling of MAESTROeX reacting bubble', fontsize=20)
plt.xscale('log', basex=2)
ax = plt.gca()
ax.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.xticks([1, 2, 4, 8, 16, 32, 64, 128])
plt.tick_params(labelsize=14)
plt.tight_layout()
plt.savefig('scaling.eps')
plt.savefig('scaling.png')
def main():
plot()
if __name__ == "__main__":
main()
| 28.137931 | 128 | 0.606618 | [
"BSD-3-Clause"
] | AMReX-Astro/MAESTRO- | Exec/test_problems/reacting_bubble/scaling/sc20/plot.py | 2,448 | Python |
from .datasets import SumStatDataset
| 18.5 | 36 | 0.864865 | [
"MIT"
] | theLongLab/phx-nn | src/dataset/__init__.py | 37 | Python |
import numpy as np
from yt.loaders import load_uniform_grid
from yt.testing import assert_allclose
def test_magnetic_code_units():
sqrt4pi = np.sqrt(4.0 * np.pi)
ddims = (16,) * 3
data = {"density": (np.random.uniform(size=ddims), "g/cm**3")}
ds1 = load_uniform_grid(
data, ddims, magnetic_unit=(sqrt4pi, "gauss"), unit_system="cgs"
)
assert_allclose(ds1.magnetic_unit.value, sqrt4pi)
assert str(ds1.magnetic_unit.units) == "G"
mucu = ds1.magnetic_unit.to("code_magnetic")
assert_allclose(mucu.value, 1.0)
assert str(mucu.units) == "code_magnetic"
ds2 = load_uniform_grid(data, ddims, magnetic_unit=(1.0, "T"), unit_system="cgs")
assert_allclose(ds2.magnetic_unit.value, 10000.0)
assert str(ds2.magnetic_unit.units) == "G"
mucu = ds2.magnetic_unit.to("code_magnetic")
assert_allclose(mucu.value, 1.0)
assert str(mucu.units) == "code_magnetic"
ds3 = load_uniform_grid(data, ddims, magnetic_unit=(1.0, "T"), unit_system="mks")
assert_allclose(ds3.magnetic_unit.value, 1.0)
assert str(ds3.magnetic_unit.units) == "T"
mucu = ds3.magnetic_unit.to("code_magnetic")
assert_allclose(mucu.value, 1.0)
assert str(mucu.units) == "code_magnetic"
ds4 = load_uniform_grid(
data, ddims, magnetic_unit=(1.0, "gauss"), unit_system="mks"
)
assert_allclose(ds4.magnetic_unit.value, 1.0e-4)
assert str(ds4.magnetic_unit.units) == "T"
mucu = ds4.magnetic_unit.to("code_magnetic")
assert_allclose(mucu.value, 1.0)
assert str(mucu.units) == "code_magnetic"
| 30.423077 | 85 | 0.683944 | [
"BSD-3-Clause-Clear"
] | Carreau/yt | yt/units/tests/test_magnetic_code_units.py | 1,582 | Python |
import math
# 1
area_of_circle = lambda r: math.pi * r ** 2
print(area_of_circle(10))
# 2
calculation = lambda x, y: ((x + y), (x - y))
print(calculation(4, 2))
# 3
def product(n):
if n == 1:
return 1
else:
return n * product(n - 1)
print(product(5))
# 4
time = lambda milli: (
round(milli / (1000 * 60 * 60 * 24)), round(milli / (1000 * 60 * 60)), round(milli / (1000 * 60)),
round(milli / 1000))
print(time(10000000))
# 5
showSalary = lambda name, salary=5000: (name, salary)
print(showSalary("A", 1000))
print(showSalary("B", 2000))
print(showSalary("C"))
# 6
diff = lambda x, y: x - y
print(diff(10, 12))
# 7
printer = lambda x, y, z: (x, str(x), y, str(y), z, str(z))
print(printer(True, 22.25, 'yes'))
# 8
data = [
('Alpha Centauri A', 4.3, 0.26, 1.56),
('Alpha Centauri B', 4.3, 0.077, 0.45),
('Alpha Centauri C', 4.2, 0.00001, 0.00006),
("Barnard's Star", 6.0, 0.00004, 0.0005),
('Wolf 359', 7.7, 0.000001, 0.00002),
('BD +36 degrees 2147', 8.2, 0.0003, 0.006),
('Luyten 726-8 A', 8.4, 0.000003, 0.00006),
('Luyten 726-8 B', 8.4, 0.000002, 0.00004),
('Sirius A', 8.6, 1.00, 23.6),
('Sirius B', 8.6, 0.001, 0.003),
('Ross 154', 9.4, 0.00002, 0.0005),
]
# data.sort()
print(data)
print(sorted(data)) | 22.275862 | 102 | 0.562693 | [
"MIT"
] | pradeep-charism/nus-mtech-workshops | nus-bead/examples/Workshop01.py | 1,292 | Python |
import threading
import time
class PyMbsThread(threading.Thread):
def __init__(self, function, realTime=False):
threading.Thread.__init__(self)
self.execute = False
self.function = function
self.realTime = realTime
self.scaling = 1
# Start-Time (Model)
self.model_offset = 0.0
# Start-Time (Real)
self.real_offset = 0.0
def reinit(self):
if (self.execute):
self.stop()
self.__init__(self.function, self.realTime)
def run(self):
# Initialise Offsets
self.real_offset = time.time()
self.model_offset = self.function()
t = self.model_offset
# Debug
# print "Starting Thread " + str(id(self))
# Endless Loop
self.execute = True
while self.execute:
# synchronise with real time
if (self.realTime):
# Real Elapsed Time
real = self.scaling*(time.time() - self.real_offset)
# Model Elapsed Time
model = t - self.model_offset
# Difference
deltaT = model-real
if (deltaT > 0):
time.sleep(deltaT)
# Execute next step
t = self.function()
# Debug
# print "Finished Thread " + str(id(self))
def stop(self):
self.execute = False
# Debug
# print "Stopped Thread " + str(id(self))
'''
Usage:
======
def myFunc():
print 'doing something'
time.sleep(1)
t = PymbsThread(myFunc)
t.start() # starts Thread
t.stop() # stop Thread
t.reinit() # "reset" thread
t.start() # start Thread again
t.stop()
''' | 21.78481 | 68 | 0.537478 | [
"MIT"
] | brutzl/pymbs | pymbs/ui/thread.py | 1,721 | Python |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cloudblue.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.590909 | 73 | 0.683625 | [
"MIT"
] | dussiks/cloudblue | manage.py | 629 | Python |
"""Config flow for Coronavirus integration."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.data_entry_flow import FlowResult
from . import get_coordinator
from .const import DOMAIN, OPTION_WORLDWIDE
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Coronavirus."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
_options = None
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the initial step."""
errors: dict[str, str] = {}
if self._options is None:
coordinator = await get_coordinator(self.hass)
if not coordinator.last_update_success or coordinator.data is None:
return self.async_abort(reason="cannot_connect")
self._options = {OPTION_WORLDWIDE: "Worldwide"}
for case in sorted(
coordinator.data.values(), key=lambda case: case.country
):
self._options[case.country] = case.country
if user_input is not None:
await self.async_set_unique_id(user_input["country"])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=self._options[user_input["country"]], data=user_input
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({vol.Required("country"): vol.In(self._options)}),
errors=errors,
)
| 31.673077 | 85 | 0.652702 | [
"Apache-2.0"
] | bimmbo/core | homeassistant/components/coronavirus/config_flow.py | 1,647 | Python |
from django_cradmin import crinstance, crapp
from django_cradmin.crinstance import reverse_cradmin_url
from devilry.apps.core.models import Period
from devilry.devilry_account.models import PeriodPermissionGroup
from devilry.devilry_admin.cradminextensions import devilry_crmenu_admin
from devilry.devilry_cradmin import devilry_crmenu
from devilry.devilry_cradmin import devilry_crinstance
from devilry.devilry_admin.views.period import admins
from devilry.devilry_admin.views.period import createassignment
from devilry.devilry_admin.views.period import examiners
from devilry.devilry_admin.views.period import overview
from devilry.devilry_admin.views.period import students
from devilry.devilry_admin.views.period import edit
from devilry.devilry_admin.views.period import overview_all_results
from devilry.devilry_qualifiesforexam import cradmin_app as qualifiesforexam
from devilry.devilry_admin.views.period.manage_tags import manage_tags
class Menu(devilry_crmenu_admin.Menu):
def build_menu(self):
super(Menu, self).build_menu()
period = self.request.cradmin_role
self.add_role_menuitem_object()
self.add_subject_breadcrumb_item(subject=period.subject)
self.add_period_breadcrumb_item(period=period, active=True)
def add_subject_breadcrumb_item(self, subject, active=False):
if self.cradmin_instance.get_devilryrole_for_requestuser() == 'periodadmin':
return self.add_headeritem_object(devilry_crmenu.BreadcrumbMenuItem(
label=subject.short_name,
url=reverse_cradmin_url(
instanceid='devilry_admin_subject_for_periodadmin',
appname='overview',
roleid=subject.id,
viewname=crapp.INDEXVIEW_NAME
),
active=active
))
else:
return self.add_headeritem_object(devilry_crmenu.BreadcrumbMenuItem(
label=subject.short_name,
url=reverse_cradmin_url(
instanceid='devilry_admin_subjectadmin',
appname='overview',
roleid=subject.id,
viewname=crapp.INDEXVIEW_NAME
),
active=active
))
class CrAdminInstance(devilry_crinstance.BaseCrInstanceAdmin):
menuclass = Menu
roleclass = Period
apps = [
('overview', overview.App),
('students', students.App),
('examiners', examiners.App),
('admins', admins.App),
('createassignment', createassignment.App),
('edit', edit.App),
('overview_all_results', overview_all_results.App),
('qualifiesforexam', qualifiesforexam.App),
('manage_tags', manage_tags.App),
]
id = 'devilry_admin_periodadmin'
rolefrontpage_appname = 'overview'
def get_rolequeryset(self):
return Period.objects.filter_user_is_admin(user=self.request.user)\
.order_by('-start_time')
def get_titletext_for_role(self, role):
"""
Get a short title briefly describing the given ``role``.
Remember that the role is n Period.
"""
period = role
return period
@classmethod
def matches_urlpath(cls, urlpath):
return urlpath.startswith('/devilry_admin/period')
def __get_devilryrole_for_requestuser(self):
period = self.request.cradmin_role
devilryrole = PeriodPermissionGroup.objects.get_devilryrole_for_user_on_period(
user=self.request.user,
period=period
)
if devilryrole is None:
raise ValueError('Could not find a devilryrole for request.user. This must be a bug in '
'get_rolequeryset().')
return devilryrole
def get_devilryrole_for_requestuser(self):
"""
Get the devilryrole for the requesting user on the current
period (request.cradmin_instance).
The return values is the same as for
:meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period`,
exept that this method raises ValueError if it does not find a role.
"""
if not hasattr(self, '_devilryrole_for_requestuser'):
self._devilryrole_for_requestuser = self.__get_devilryrole_for_requestuser()
return self._devilryrole_for_requestuser
| 40.272727 | 112 | 0.687133 | [
"BSD-3-Clause"
] | aless80/devilry-django | devilry/devilry_admin/views/period/crinstance_period.py | 4,430 | Python |
import ref_bot.cog.articlerefs
import importlib
import conf
from discord.ext import commands
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
def setup_dbsession():
engine = create_engine(conf.ini_config.get('sqlalchemy', 'connection_string'))
sessionm = sessionmaker()
sessionm.configure(bind=engine)
return sessionm()
def setup(bot):
print('ref_bot extension loading.')
dbsession = setup_dbsession()
importlib.reload(ref_bot.cog.articlerefs)
bot.remove_command('help')
bot.add_cog(ref_bot.cog.articlerefs.ArticleRefs(bot, dbsession))
def teardown(bot):
print('ref_bot extension unloading')
bot.remove_cog('ArticleRefs')
| 25.321429 | 86 | 0.750353 | [
"MIT"
] | tser0f/ref_bot | ref_bot/extension.py | 709 | Python |
# qubit number=4
# total number=42
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=10
prog.h(input_qubit[3]) # number=39
prog.cz(input_qubit[0],input_qubit[3]) # number=40
prog.h(input_qubit[3]) # number=41
prog.cx(input_qubit[0],input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=34
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.cx(input_qubit[0],input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=12
prog.h(input_qubit[2]) # number=30
prog.cz(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=32
prog.x(input_qubit[2]) # number=21
prog.h(input_qubit[2]) # number=36
prog.cz(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=38
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[1],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=26
prog.cz(input_qubit[3],input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=28
prog.cx(input_qubit[3],input_qubit[0]) # number=14
prog.y(input_qubit[2]) # number=29
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2449.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 35.648 | 165 | 0.656867 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | benchmark/startQiskit_QC2449.py | 4,456 | Python |
# from nonbonded.cli.project.project import project
#
# __all__ = [project]
| 19 | 51 | 0.75 | [
"MIT"
] | SimonBoothroyd/nonbonded | nonbonded/cli/projects/__init__.py | 76 | Python |
# Copyright 2022 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pkg.apis.manager.v1beta1.python import api_pb2 as api
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class Trial(object):
def __init__(self, name, assignments, target_metric, metric_name, additional_metrics):
self.name = name
self.assignments = assignments
self.target_metric = target_metric
self.metric_name = metric_name
self.additional_metrics = additional_metrics
@staticmethod
def convert(trials):
res = []
for trial in trials:
if trial.status.condition == api.TrialStatus.TrialConditionType.SUCCEEDED:
new_trial = Trial.convertTrial(trial)
if new_trial is not None:
res.append(Trial.convertTrial(trial))
return res
@staticmethod
def convertTrial(trial):
assignments = []
for assignment in trial.spec.parameter_assignments.assignments:
assignments.append(Assignment.convert(assignment))
metric_name = trial.spec.objective.objective_metric_name
target_metric, additional_metrics = Metric.convert(
trial.status.observation, metric_name)
# If the target_metric is none, ignore the trial.
if target_metric is not None:
trial = Trial(trial.name, assignments, target_metric,
metric_name, additional_metrics)
return trial
return None
def __str__(self):
if self.name is None:
return "Trial(assignment: {})".format(", ".join([str(e) for e in self.assignments]))
else:
return "Trial(assignment: {}, metric_name: {}, metric: {}, additional_metrics: {})".format(
", ".join([str(e) for e in self.assignments]),
self.metric_name, self.target_metric,
", ".join(str(e) for e in self.additional_metrics))
class Assignment(object):
def __init__(self, name, value):
self.name = name
self.value = value
@staticmethod
def convert(assignment):
return Assignment(assignment.name, assignment.value)
@staticmethod
def generate(list_of_assignments):
res = []
for assignments in list_of_assignments:
buf = []
for assignment in assignments:
buf.append(
api.ParameterAssignment(name=assignment.name, value=str(assignment.value)))
rt = api.GetSuggestionsReply.ParameterAssignments(
assignments=buf)
res.append(rt)
return res
def __str__(self):
return "Assignment(name={}, value={})".format(self.name, self.value)
class Metric(object):
def __init__(self, name, value):
self.name = name
self.value = value
@staticmethod
def convert(observation, target):
metric = None
additional_metrics = []
for m in observation.metrics:
if m.name == target:
metric = Metric(m.name, m.value)
else:
additional_metrics.append(Metric(m.name, m.value))
return metric, additional_metrics
def __str__(self):
return "Metric(name={}, value={})".format(self.name, self.value)
| 35.036364 | 103 | 0.640114 | [
"Apache-2.0"
] | a9p/katib | pkg/suggestion/v1beta1/internal/trial.py | 3,854 | Python |
import io
from sdk.db import database
import numpy as np
import matplotlib.pyplot as plt
from numba import jit
from utils import logger
semester_order = ['FA', 'WI', 'SP', 'SU']
def get_section(x):
return (
x.get('semester'),
x.get('year')
)
def filter_function(sections):
d = {}
for x in sections:
if x not in d.keys():
d.update({x: 1})
else:
d[x] += 1
return sorted(list(d.items()), key=lambda k: k[0][1] + semester_order.index(k[0][0]) / 10)
def get_plot(sc, cn, isQuarter=True) -> io.BytesIO:
query = database.get_query()
database.isQuarter(isQuarter, query)
database.subject_code(sc, query)
database.course_number(cn, query)
q = database.execute(query)
q = filter_function(map(get_section, q))
keys = range(0, len(q))
vals = [x[1] for x in q]
buffer = io.BytesIO()
plt.plot(keys, vals)
plt.xticks(np.arange(len(q)), [f'{x[0][0]} \'{x[0][1]}' for x in q])
plt.savefig(buffer, format='png')
buffer.seek(0)
plt.close()
return buffer | 24.111111 | 94 | 0.598157 | [
"Apache-2.0"
] | jzlotek/drexel-tms-parser | src/plotter.py | 1,085 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# capmetrics-etl documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 11 00:08:57 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'capmetrics-etl'
copyright = '2016, Julio Gonzalez Altamirano'
author = 'Julio Gonzalez Altamirano'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
version = '0.1.0'
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'capmetrics-etldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'capmetrics-etl.tex', 'capmetrics-etl Documentation',
'Julio Gonzalez Altamirano', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'capmetrics-etl', 'capmetrics-etl Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'capmetrics-etl', 'capmetrics-etl Documentation',
author, 'capmetrics-etl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.452962 | 79 | 0.719025 | [
"MIT"
] | jga/capmetrics-etl | docs/conf.py | 9,314 | Python |
import string
from app.chatterbot import languages
from spacy.lang.zh import Chinese
class LowercaseTagger(object):
"""
Returns the text in lowercase.
"""
def __init__(self, language=None):
self.language = language or languages.ENG
def get_text_index_string(self, text):
return text.lower()
class PosLemmaTagger(object):
def __init__(self, language=None):
import spacy
self.language = language or languages.ENG
punc = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
# punc = punc.decode("utf-8")
self.punctuation_table = str.maketrans(dict.fromkeys(string.punctuation + punc))
language = self.language.ISO_639_1.lower()
if language == 'zh':
self.nlp = Chinese()
else:
self.nlp = spacy.load(language)
def get_text_index_string(self, text):
"""
Return a string of text containing part-of-speech, lemma pairs.
"""
if len(text) <= 2:
text_without_punctuation = text.translate(self.punctuation_table)
if len(text_without_punctuation) >= 1:
text = text_without_punctuation
document = self.nlp(text)
if len(text) <= 2:
bigram_pairs = [
token.lemma_.lower() for token in document
]
tokens = [ele for ele in bigram_pairs]
else:
tokens = [
token for token in document if token.is_alpha and not token.is_stop
]
if len(tokens) < 2:
tokens = [
token for token in document if token.is_alpha
]
tokens = [token.lemma_.lower() for token in tokens]
# if not bigram_pairs:
# bigram_pairs = [
# token.lemma_.lower() for token in document
# ]
#
# return ' '.join(bigram_pairs)
return ' '.join(tokens)
| 30.5 | 91 | 0.535519 | [
"BSD-3-Clause"
] | Jack2313/WeChatterBot | app/chatterbot/tagging.py | 2,159 | Python |
import torch_xla.test.test_utils as test_utils
import torch_xla.distributed.xla_multiprocessing as xmp
import torch_xla.core.xla_model as xm
import torch_xla.utils.utils as xu
import torch_xla.distributed.parallel_loader as pl
import torch_xla.debug.metrics as met
import torch_xla
import torchvision.transforms as transforms
import torchvision
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import sys
import os
import webdataset as wds
import datetime
import time
# import warnings
# warnings.filterwarnings("ignore")
from itertools import islice
import torch_xla.debug.profiler as xp
# profiler_port=9012
for extra in ('/usr/share/torch-xla-1.7/pytorch/xla/test', '/pytorch/xla/test', '/usr/share/pytorch/xla/test'):
if os.path.exists(extra):
sys.path.insert(0, extra)
import schedulers
# import gcsdataset
import args_parse # XLA arg parser
# import argparse # py arg parser
# parser = argparse.ArgumentParser(description='WebDataset args for modified XLA model')
# parser.add_argument('--wds_traindir', type=str, default='/tmp/imagenet')
# parser.add_argument('--wds_testdir', type=str, default='/tmp/imagenet')
# parser.add_argument('--trainsize', type=int, default=1280000)
# parser.add_argument('--testsize', type=int, default=50000)
# wds_args, others = parser.parse_known_args()
SUPPORTED_MODELS = [
'alexnet', 'densenet121', 'densenet161', 'densenet169', 'densenet201',
'inception_v3', 'resnet101', 'resnet152', 'resnet18', 'resnet34',
'resnet50', 'squeezenet1_0', 'squeezenet1_1', 'vgg11', 'vgg11_bn', 'vgg13',
'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn'
]
MODEL_OPTS = {
'--model': {
'choices': SUPPORTED_MODELS,
'default': 'resnet50',
},
'--test_set_batch_size': {
'type': int,
},
'--lr_scheduler_type': {
'type': str,
},
'--lr_scheduler_divide_every_n_epochs': {
'type': int,
},
'--lr_scheduler_divisor': {
'type': int,
},
'--dataset': {
'choices': ['gcsdataset', 'torchdataset'],
'default': 'gcsdataset',
'type': str,
},
}
# '--wds_traindir': {
# 'type': str,
# 'default':'/tmp/imagenet'
# },
# '--wds_testdir': {
# 'type': str,
# 'default': '/tmp/imagenet'
# },
# '--trainsize': {
# 'type': int,
# 'default': 1280000
# },
# '--testsize': {
# 'type': int,
# 'default': 50000
# },
FLAGS = args_parse.parse_common_options(
datadir='/tmp/imagenet',
batch_size=None,
num_epochs=None,
momentum=None,
lr=None,
target_accuracy=None,
opts=MODEL_OPTS.items(),
profiler_port=9012,
)
DEFAULT_KWARGS = dict(
batch_size=128,
test_set_batch_size=64,
num_epochs=18,
momentum=0.9,
lr=0.1,
target_accuracy=0.0,
)
MODEL_SPECIFIC_DEFAULTS = {
# Override some of the args in DEFAULT_KWARGS, or add them to the dict
# if they don't exist.
'resnet50':
dict(
DEFAULT_KWARGS, **{
'lr': 0.5,
'lr_scheduler_divide_every_n_epochs': 20,
'lr_scheduler_divisor': 5,
'lr_scheduler_type': 'WarmupAndExponentialDecayScheduler',
})
}
# Set any args that were not explicitly given by the user.
default_value_dict = MODEL_SPECIFIC_DEFAULTS.get(FLAGS.model, DEFAULT_KWARGS)
for arg, value in default_value_dict.items():
if getattr(FLAGS, arg) is None:
setattr(FLAGS, arg, value)
def get_model_property(key):
default_model_property = {
'img_dim': 224,
'model_fn': getattr(torchvision.models, FLAGS.model)
}
model_properties = {
'inception_v3': {
'img_dim': 299,
'model_fn': lambda: torchvision.models.inception_v3(aux_logits=False)
},
}
model_fn = model_properties.get(FLAGS.model, default_model_property)[key]
return model_fn
def _train_update(device, step, loss, tracker, epoch, writer):
test_utils.print_training_update(
device,
step,
loss.item(),
tracker.rate(),
tracker.global_rate(),
epoch,
summary_writer=writer)
##### WDS ########
# trainsize = 1281167 # all shards
trainsize = 1280000 #FLAGS.trainsize # 1280 shards {000...079}
testsize = 50000 # FLAGS.testsize
# train_dir = FLAGS.wds_traindir
# test_dir = FLAGS.wds_testdir
def identity(x):
return x
def my_worker_splitter(urls):
"""Split urls per worker
Selects a subset of urls based on Torch get_worker_info.
Used as a shard selection function in Dataset.
replaces wds.split_by_worker"""
# import torch
urls = [url for url in urls]
assert isinstance(urls, list)
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
wid = worker_info.id
num_workers = worker_info.num_workers
return urls[wid::num_workers]
else:
return urls
def my_node_splitter(urls):
"""Split urls_ correctly per accelerator node
:param urls:
:return: slice of urls_
"""
rank=xm.get_ordinal()
num_replicas=xm.xrt_world_size()
urls_this = urls[rank::num_replicas]
return urls_this
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def make_train_loader(img_dim, shuffle=10000, batch_size=FLAGS.batch_size):
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards/imagenet-train-{000000..001281}.tar"
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards/imagenet-train-{000000..001279}.tar"
# "pipe:cat /mnt/disks/dataset/webdataset/shards/imagenet-train-{000000..001281}.tar"
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-320/imagenet-train-{000000..000320}.tar"
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-640/imagenet-train-{000000..000639}.tar"
num_dataset_instances = xm.xrt_world_size() * FLAGS.num_workers
epoch_size = trainsize // num_dataset_instances
# num_batches = (epoch_size + batch_size - 1) // batch_size
# num_batches = epoch_size // batch_size
image_transform = transforms.Compose(
[
transforms.RandomResizedCrop(img_dim),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
)
dataset = (
wds.WebDataset("pipe:cat /mnt/disks/dataset/webdataset/shards-640/imagenet-train-{000000..000639}.tar", # FLAGS.wds_traindir,
splitter=my_worker_splitter, nodesplitter=my_node_splitter, shardshuffle=True, length=epoch_size)
.shuffle(shuffle)
.decode("pil")
.to_tuple("ppm;jpg;jpeg;png", "cls")
.map_tuple(image_transform, identity)
.batched(batch_size, partial=True)
)
loader = torch.utils.data.DataLoader(dataset, batch_size=None, shuffle=False, drop_last=False, num_workers=FLAGS.num_workers) # , worker_init_fn=worker_init_fn
return loader
def make_val_loader(img_dim, resize_dim, batch_size=FLAGS.test_set_batch_size):
num_dataset_instances = xm.xrt_world_size() * FLAGS.num_workers
epoch_test_size = testsize // num_dataset_instances
# num_batches = (epoch_size + batch_size - 1) // batch_size
# num_test_batches = epoch_test_size // batch_size
val_transform = transforms.Compose(
[
transforms.Resize(resize_dim),
transforms.CenterCrop(img_dim),
transforms.ToTensor(),
normalize,
]
)
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-320/imagenet-val-{000000..000012}.tar"
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards/imagenet-val-{000000..000049}.tar"
# "pipe:cat /mnt/disks/dataset/webdataset/shards/imagenet-val-{000000..000049}.tar"
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-640/imagenet-val-{000000..000024}.tar"
val_dataset = (
wds.WebDataset("pipe:cat /mnt/disks/dataset/webdataset/shards/imagenet-val-{000000..000049}.tar", # FLAGS.wds_testdir,
splitter=my_worker_splitter, nodesplitter=my_node_splitter, shardshuffle=False, length=epoch_test_size)
.decode("pil")
.to_tuple("ppm;jpg;jpeg;png", "cls")
.map_tuple(val_transform, identity)
.batched(batch_size, partial=True)
)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=None, shuffle=False, num_workers=FLAGS.num_workers) # , worker_init_fn=worker_init_fn, pin_memory=False
return val_loader
def train_imagenet():
print('==> Preparing data..')
img_dim = get_model_property('img_dim')
resize_dim = max(img_dim, 256)
train_loader = make_train_loader(img_dim, batch_size=FLAGS.batch_size, shuffle=10000)
test_loader = make_val_loader(img_dim, resize_dim, batch_size=FLAGS.test_set_batch_size)
torch.manual_seed(42)
server = xp.start_server(FLAGS.profiler_port)
device = xm.xla_device()
model = get_model_property('model_fn')().to(device)
writer = None
if xm.is_master_ordinal():
writer = test_utils.get_summary_writer(FLAGS.logdir)
optimizer = optim.SGD(
model.parameters(),
lr=FLAGS.lr,
momentum=FLAGS.momentum,
weight_decay=1e-4)
num_training_steps_per_epoch = trainsize // (
FLAGS.batch_size * xm.xrt_world_size())
lr_scheduler = schedulers.wrap_optimizer_with_scheduler(
optimizer,
scheduler_type=getattr(FLAGS, 'lr_scheduler_type', None),
scheduler_divisor=getattr(FLAGS, 'lr_scheduler_divisor', None),
scheduler_divide_every_n_epochs=getattr(
FLAGS, 'lr_scheduler_divide_every_n_epochs', None),
num_steps_per_epoch=num_training_steps_per_epoch,
summary_writer=writer)
loss_fn = nn.CrossEntropyLoss()
# global_step = 0
# server = xp.start_server(profiler_port)
def train_loop_fn(loader, epoch):
train_steps = trainsize // (FLAGS.batch_size * xm.xrt_world_size())
tracker = xm.RateTracker()
total_samples = 0
rate_list = []
model.train()
for step, (data, target) in enumerate(loader): # repeatedly(loader) | enumerate(islice(loader, 0, train_steps))
# global_step += 1
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss.backward()
xm.optimizer_step(optimizer)
tracker.add(FLAGS.batch_size)
total_samples += data.size()[0]
# rate_list.append(tracker.rate())
# replica_rate = tracker.rate()
# global_rate = tracker.global_rate()
if lr_scheduler:
lr_scheduler.step()
if step % FLAGS.log_steps == 0:
xm.add_step_closure(
_train_update, args=(device, step, loss, tracker, epoch, writer))
test_utils.write_to_summary(writer, step, dict_to_write={'Rate_step': tracker.rate()}, write_xla_metrics=False)
if step == train_steps:
break
# replica_max_rate = np.max(tracker.rate())
reduced_global = xm.mesh_reduce('reduced_global', tracker.global_rate(), np.mean)
# reduced_max_rate = xm.mesh_reduce('max_rate', tracker.rate(), np.mean)
return total_samples, reduced_global
def test_loop_fn(loader, epoch):
test_steps = testsize // (FLAGS.test_set_batch_size * xm.xrt_world_size())
total_samples, correct = 0, 0
model.eval()
for step, (data, target) in enumerate(loader): # repeatedly(loader) | enumerate(islice(loader, 0, test_steps)
output = model(data)
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum()
total_samples += data.size()[0]
if step % FLAGS.log_steps == 0:
xm.add_step_closure(
test_utils.print_test_update, args=(device, None, epoch, step))
if step == test_steps:
break
correct_val = correct.item()
accuracy_replica = 100.0 * correct_val / total_samples
accuracy = xm.mesh_reduce('test_accuracy', accuracy_replica, np.mean)
return accuracy, accuracy_replica, total_samples
train_device_loader = pl.MpDeviceLoader(train_loader, device)
test_device_loader = pl.MpDeviceLoader(test_loader, device)
accuracy, max_accuracy = 0.0, 0.0
training_start_time = time.time()
for epoch in range(1, FLAGS.num_epochs + 1):
xm.master_print('Epoch {} train begin {}'.format(
epoch, test_utils.now()))
replica_epoch_start = time.time()
replica_train_samples, reduced_global = train_loop_fn(train_device_loader, epoch)
replica_epoch_time = time.time() - replica_epoch_start
avg_epoch_time_mesh = xm.mesh_reduce('epoch_time', replica_epoch_time, np.mean)
reduced_global = reduced_global * xm.xrt_world_size()
xm.master_print('Epoch {} train end {}, Epoch Time={}, Replica Train Samples={}, Reduced GlobalRate={:.2f}'.format(
epoch, test_utils.now(), str(datetime.timedelta(seconds=avg_epoch_time_mesh)).split('.')[0], replica_train_samples, reduced_global))
accuracy, accuracy_replica, replica_test_samples = test_loop_fn(test_device_loader, epoch)
xm.master_print('Epoch {} test end {}, Reduced Accuracy={:.2f}%, Replica Accuracy={:.2f}%, Replica Test Samples={}'.format(
epoch, test_utils.now(), accuracy, accuracy_replica, replica_test_samples))
max_accuracy = max(accuracy, max_accuracy)
test_utils.write_to_summary(
writer,
epoch,
dict_to_write={'Accuracy/test': accuracy,
'Global Rate': reduced_global},
write_xla_metrics=False)
if FLAGS.metrics_debug:
xm.master_print(met.metrics_report())
test_utils.close_summary_writer(writer)
total_train_time = time.time() - training_start_time
xm.master_print('Total Train Time: {}'.format(str(datetime.timedelta(seconds=total_train_time)).split('.')[0]))
xm.master_print('Max Accuracy: {:.2f}%'.format(max_accuracy))
xm.master_print('Avg. Global Rate: {:.2f} examples per second'.format(reduced_global))
return max_accuracy
def _mp_fn(index, flags):
global FLAGS
FLAGS = flags
torch.set_default_tensor_type('torch.FloatTensor')
accuracy = train_imagenet()
if accuracy < FLAGS.target_accuracy:
print('Accuracy {} is below target {}'.format(accuracy,
FLAGS.target_accuracy))
sys.exit(21)
if __name__ == '__main__':
xmp.spawn(_mp_fn, args=(FLAGS,), nprocs=FLAGS.num_cores, start_method='fork') # , start_method='spawn'
| 37.169951 | 173 | 0.653701 | [
"Apache-2.0"
] | mlexample/gcspytorchimagenet | test_train_mp_wds_local.py | 15,091 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoinlimitededition Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
from test_framework.test_framework import BitcoinlimitededitionTestFramework
from test_framework.util import *
from struct import *
from io import BytesIO
from codecs import encode
import http.client
import urllib.parse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls
def http_get_call(host, port, path, response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read().decode('utf-8')
#allows simple http post calls with a request body
def http_post_call(host, port, path, requestdata = '', response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('POST', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinlimitededitionTestFramework):
FORMAT_SEPARATOR = "."
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-rest"]] * self.num_nodes
def setup_network(self, split=False):
super().setup_network()
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
url = urllib.parse.urlparse(self.nodes[0].url)
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
#######################################
# GETUTXOS: query an unspent outpoint #
#######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
#################################################
# GETUTXOS: now query an already spent outpoint #
#################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utxo in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += hex_str_to_bytes(txid)
binaryRequest += pack("i", n)
binaryRequest += hex_str_to_bytes(vintx)
binaryRequest += pack("i", 0)
bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = BytesIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(64)
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be an outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 400) #must be a 400 because we send an invalid json request
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 400) #must be a 400 because we send an invalid bin request
response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 400) #must be a 400 because we send an invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 400) #must be a 400 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 200 because we are within the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160])
# check json format
block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
block_json_obj = json.loads(block_json_string)
assert_equal(block_json_obj['hash'], bb_hash)
# compare with json block header
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str, parse_float=Decimal)
assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
#compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations'])
assert_equal(json_obj[0]['height'], rpc_block_json['height'])
assert_equal(json_obj[0]['version'], rpc_block_json['version'])
assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
assert_equal(json_obj[0]['time'], rpc_block_json['time'])
assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash'])
#see if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str)
assert_equal(len(json_obj), 5) #now we should have 5 header objects
# do tx test
tx_hash = block_json_obj['tx'][0]['txid']
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# check that there are exactly 3 transactions in the TX memory pool before generating the block
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# check that there are our submitted transactions in the TX memory pool
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for i, tx in enumerate(txs):
assert_equal(tx in json_obj, True)
assert_equal(json_obj[tx]['spentby'], txs[i+1:i+2])
assert_equal(json_obj[tx]['depends'], txs[i-1:i])
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
| 46.118182 | 132 | 0.642355 | [
"MIT"
] | bitcoinlimitededition/bitcoinlimitededition | test/functional/interface_rest.py | 15,219 | Python |
import random
import turtle
import time
def menu():
x = input('would you like to start the game? \n (YES/NO) \n would you like to quit the menu bar? \n (QUIT) \n *PLEASE USE CAPITAL LETTERS \n YOUR ANSWER: ')
if x == 'NO' or x == 'QUIT':
quit()
elif x == 'YES':
print('')
menu()
print('are you MALE/FEMALE ? ')
print('*PLEASE USE CAPITAL LETTERS')
gender = input('ANSWER:')
#lists
box_color_list = ["box1.gif", "box2.gif", "box3.gif", "box4.gif", "box5.gif"]
background_list = ["background1.gif", "background2.gif", "background3.gif", "background4.gif"]
randombox = random.randint (0, len(box_color_list)-1)
this_box = box_color_list[randombox]
box = turtle.clone()
turtle.register_shape(this_box)
box.shape(this_box)
background = random.randint (0,4)
screen = turtle.Screen()
randbackground = random.randint (0,len(background_list)-1)
this_background = background_list [randbackground]
turtle.register_shape(this_background)
turtle.bgpic (this_background)
turtle.tracer(1, 0)
turtle2 = turtle.clone()
score = 0
turtle2.write(str(score))
turtle2.ht()
turtle.penup()
#bird = turtle.clone()
#turtle.addshape('bird.gif')
#bird.shape('bird.gif')
turtle.shape('circle')
#turtle.hideturtle()
turtle.Screen()
turtle.fillcolor('white')
screen = turtle.Screen()
screen.bgcolor('light blue')
turtle.goto(0,-200)
good_food_pos= []
boxes_list = []
bad_food_pos = []
good_food_stamps = []
bad_food_stamps = []
box_stamps = []
box_pos=[]
bird_pos=[]
turtles_list = []
SIZE_X = 400
SIZE_Y = 400
turtle.setup(500,500)
player_size = 10
my_pos = turtle.pos()
x_pos = my_pos[0]
y_pos = my_pos[1]
UP_EDGE = 200
DOWN_EDGE = -200
RIGHT_EDGE = 200
LEFT_EDGE = -200
UP_ARROW = 'Up'
LEFT_ARROW = 'Left'
DOWN_ARROW = 'Down'
RIGHT_ARROW = 'Right'
TIME_STEP = 100
TIME_STEP2 = 10000
SPACEBAR = 'space'
def move_player():
my_pos = turtle.pos()
x_pos = my_pos[0]
y_pos = my_pos[1]
x_ok = LEFT_EDGE <= x_pos <= RIGHT_EDGE
y_ok = UP_EDGE >= y_pos >= DOWN_EDGE
within_bounds = x_ok and y_ok
if turtle.pos()[0] == RIGHT_EDGE:
turtle.goto (LEFT_EDGE + 20,turtle.pos()[1])
if turtle.pos()[0] == LEFT_EDGE :
turtle.goto (RIGHT_EDGE - 20,turtle.pos()[1])
####'''
#### if x_pos >= RIGHT_EDGE:
#### turtle.goto(RIGHT_EDGE - 10, y_pos)
#### if x_pos <= LEFT_EDGE:
#### turtle.goto(LEFT_EDGE + 10, y_pos)
#### if y_pos >= UP_EDGE:
#### turtle.goto(x_pos, UP_EDGE + 10)
####'''
## if within_bounds:
## if direction == RIGHT:
## turtle.goto(x_pos + 10,y_pos)
## elif direction == LEFT:
## turtle.goto(x_pos - 10,y_pos)
## elif direction == UP:
## turtle.goto(x_pos, y_pos +10)
##
## #if turtle.pos() == my_clone.pos():
##
##
## '''
## else:
## # x checks
## # right edge check
## if x_pos >= RIGHT_EDGE:
## if direction == LEFT:
## turtle.goto(x_pos - 1,y_pos)
## if x_pos <= LEFT_EDGE:
## if direction == RIGHT:
## turtle.goto(x_pos + 1,y_pos)
##
## if y_pos >= UP_EDGE:
## if direction == RIGHT:
## turtle.goto(x_pos + 10,y_pos)
## elif direction == LEFT:
## turtle.goto(x_pos - 10, y_pos)
## elif direction == DOWN:
## turtle.goto(x_pos, y_pos -10)
##
## if y_pos <= DOWN_EDGE:
## if direction == RIGHT:
## turtle.goto(x_pos + 10,y_pos)
## elif direction == LEFT:
## turtle.goto(x_pos - 10, y_pos)
## elif direction == UP:
## turtle.goto(x_pos, y_pos + 10)
## '''
global food,score
#turtle.ontimer(move_player,TIME_STEP)
if turtle.pos() in good_food_pos:
good_food_ind = good_food_pos.index(turtle.pos())
food.clearstamp(good_food_stamps[good_food_ind])
good_food_stamps.pop(good_food_ind)
good_food_pos.pop(good_food_ind)
print('EATEN GOOD FOOD!')
score = score + 1
turtle2.clear()
turtle2.write(str(score))
good_food()
if turtle.pos() in bad_food_pos:
bad_food_ind = bad_food_pos.index(turtle.pos())
bad_food.clearstamp(bad_food_stamps[bad_food_ind])
bad_food_stamps.pop(bad_food_ind)
bad_food_pos.pop(bad_food_ind)
print('EATEN BAD FOOD!')
score = score - 1
turtle2.clear()
turtle2.write(str(score))
if score == -5:
print('GAME OVER!')
quit()
bad_food1()
UP = 0
LEFT = 1
DOWN = 2
RIGHT = 3
direction = DOWN
turtle.register_shape('man_right.gif')
turtle.register_shape('man_left.gif')
turtle.register_shape('woman_right.gif')
turtle.register_shape('woman_left.gif')
if gender == "MALE" :
turtle.shape('man_right.gif')
else:
turtle.shape('woman_right.gif')
def left():
global direction
direction = LEFT
if gender == "MALE" :
turtle.shape('man_left.gif')
else:
turtle.shape('woman_left.gif')
move_player()
print('you pressed the left key')
def right():
global direction
direction = RIGHT
if gender == "MALE" :
turtle.shape('man_right.gif')
else:
turtle.shape('woman_right.gif')
move_player()
print('you pressed the right key')
turtle.onkeypress(left, LEFT_ARROW)
turtle.onkeypress(right, RIGHT_ARROW)
turtle.listen()
good_pos = (0,0) ##
food = turtle.clone()
food.shape('square')
food.fillcolor('green')
food.hideturtle()
def good_food():
min_x=-int(SIZE_X/2/player_size)+1
max_x=int(SIZE_X/2/player_size)-1
food_x = random.randint(min_x,max_x)*player_size
food.goto(food_x,turtle.pos()[1])
good_food_pos.append(food.pos())
stampnew = food.stamp()
#stamp_old = food_stamps[-1]
good_food_stamps.append(stampnew)
def create_box():
global y_pos,box,SIZE_X,player_size
top_y = 300
min_x=-int(SIZE_X/2/player_size)+1
max_x=int(SIZE_X/2/player_size)-1
x = random.randint(min_x,max_x)*player_size
turtles_list.append(turtle.clone())
turtles_list[-1].hideturtle()
turtles_list[-1].shape("square")
turtles_list[-1].fillcolor('red')
turtles_list[-1].goto(x,top_y)
turtles_list[-1].showturtle()
min_x=-int(SIZE_X/2/player_size)+1
max_x=int(SIZE_X/2/player_size)-1
x = random.randint(min_x,max_x)*player_size
turtles_list[-1].goto(x,top_y)
turtles_list[-1].showturtle()
chose_number()
#box.goto(x,y_pos)
#box.goto(x,260)
#box.addshape('box.gif')
#box.shape('box.gif')
#all_way = 510
count = 0
def fall():
global turtles_list,top_y,x_pos,turtle,count
for my_clone in turtles_list:
x1 = my_clone.pos()[0]
y1 = my_clone.pos()[1]
if y1 > turtle.pos()[1]:
y1 = y1 -25
#x1 = x_pos
my_clone.goto(x1,y1)
count += 1
print(count)
if count%100==0:
num_box = count//100
for i in range(num_box):
create_box()
#for num_box in :
#create_box()
#turtle.ontimer(create_box,TIME_STEP2)
turtle.ontimer(fall,TIME_STEP)
def jump():
global direction,x_pos,y_pos,my_pos,y1
if direction == UP:
turtle.goto(turtle.pos()[0],turtle.pos()[1] + 20)
for my_turtle in turtles_list:
if turtle.pos() == my_turtle.pos():
if turtle.pos() == my_turtle.pos():
turtle.goto(turtle.pos()[0],y1)
if not turtle.pos() == my_clone.pos():
turtle.goto(turtle.pos()[0],turtle.pos()[1] - 20)
def chose_number():
number_of_boxes=random.randint(1,3)
for i in range (number_of_boxes):
x5 = turtle.clone()
x5.shape("square")
boxes_list.append(x5)
for g in boxes_list:
g.goto(random.randint(-200,200),200)
bad_pos = (0,0)
bad_food = turtle.clone()
bad_food.shape('square')
bad_food.fillcolor('black')
bad_food.hideturtle()
def bad_food1():
global SIZE_X,player_size,y_pos,bad_food
min_x=-int(SIZE_X/2/player_size)+1
max_x=int(SIZE_X/2/player_size)-1
bad_food_x = random.randint(min_x,max_x)*player_size
bad_food.goto(bad_food_x,y_pos)
bad_food_pos.append(bad_food.pos())
bad_stamp_new = bad_food.stamp()
#stamp_old = food_stamps[-1]
bad_food_stamps.append(bad_stamp_new)
my_clone = turtle.clone()
my_clone.ht()
bad_food1()
good_food()
move_player()
create_box()
fall()
if turtle.pos() in box_pos:
print("YOU LOST !")
quit()
| 25.291908 | 160 | 0.601531 | [
"MIT"
] | idane19-meet/EAT_IT | jennifer & Laith/laith_eatit.py | 8,751 | Python |
import math
n=int(input("team:"))
S=math.factorial(n)//math.factorial(n-3)
D=math.factorial(n)
print("top places:" +str(S))
print("all places:" +str(D))
| 21.857143 | 40 | 0.679739 | [
"MIT"
] | brickdonut/2019-fall-polytech-cs | 2z.py | 153 | Python |
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, input_size, hidden_layers, out_size):
super(MLP, self).__init__()
self.sizes = [input_size] + hidden_layers + [out_size]
self.linears = [nn.Linear(in_dim, out_dim, True) for in_dim, out_dim in zip(self.sizes[: -1], self.sizes[1:])]
self.linears = nn.ModuleList(self.linears)
self.weight_init()
def forward(self, x):
for layer in self.linears[:-1]:
x = F.relu(layer(x))
x = self.linears[-1](x)
return x
def weight_init(self):
for layer in self.linears:
torch.nn.init.xavier_uniform(layer.weight)
torch.nn.init.zeros_(layer.bias)
class MLP_bn(nn.Module):
def __init__(self, input_size, hidden_layers, out_size):
super(MLP_bn, self).__init__()
self.sizes = [input_size] + hidden_layers + [out_size]
self.linears = [nn.Sequential(nn.Linear(in_dim, out_dim, True), nn.BatchNorm1d(out_dim)) for in_dim, out_dim in zip(self.sizes[: -1], self.sizes[1:])]
self.linears = nn.ModuleList(self.linears)
self.weight_init()
def forward(self, x):
for layer in self.linears[:-1]:
x = F.relu(layer(x))
x = self.linears[-1][0](x)
return x
def weight_init(self):
for layer in self.linears:
torch.nn.init.xavier_uniform(layer[0].weight)
torch.nn.init.zeros_(layer[0].bias)
class MLP_drop(nn.Module):
def __init__(self, input_size, hidden_layers, out_size):
super(MLP_drop, self).__init__()
self.sizes = [input_size] + hidden_layers + [out_size]
self.linears = [nn.Sequential(nn.Linear(in_dim, out_dim, True), nn.Dropout(0.5)) for in_dim, out_dim in zip(self.sizes[: -1], self.sizes[1:])]
self.linears = nn.ModuleList(self.linears)
self.weight_init()
def forward(self, x):
for layer in self.linears[:-1]:
x = F.relu(layer(x))
x = self.linears[-1][0](x)
return x
def weight_init(self):
for layer in self.linears:
torch.nn.init.xavier_uniform(layer[0].weight)
torch.nn.init.zeros_(layer[0].bias)
def train_nn(model, data, num_epoch=5000):
train_dataset = TensorDataset(torch.Tensor(data.Xtrain), torch.Tensor(data.Ytrain))
train_dataloader = DataLoader(dataset=train_dataset, batch_size=128, shuffle=True)
test_dataset = TensorDataset(torch.Tensor(data.Xtest), torch.Tensor(data.Ytest))
test_dataloader = DataLoader(dataset=test_dataset, batch_size=128)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
losses = []
for epoch in range(num_epoch):
for inputs, targets in train_dataloader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
model.eval()
loss = 0.
for inputs, targets in test_dataloader:
outputs = model(inputs)
loss += criterion(outputs, targets).data
losses.append(loss.data // len(test_dataloader))
model.train()
return losses
| 36.966667 | 158 | 0.6318 | [
"MIT"
] | yunndlalala/MCS-project | nn_model.py | 3,327 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ['baidu_download']
from ..common import *
from .embed import *
from .universal import *
def baidu_get_song_data(sid):
data = json.loads(get_html(
'http://music.baidu.com/data/music/fmlink?songIds=%s' % sid, faker=True))['data']
if data['xcode'] != '':
# inside china mainland
return data['songList'][0]
else:
# outside china mainland
return None
def baidu_get_song_url(data):
return data['songLink']
def baidu_get_song_artist(data):
return data['artistName']
def baidu_get_song_album(data):
return data['albumName']
def baidu_get_song_title(data):
return data['songName']
def baidu_get_song_lyric(data):
lrc = data['lrcLink']
return "http://music.baidu.com%s" % lrc if lrc else None
def baidu_download_song(sid, output_dir='.', merge=True, info_only=False):
data = baidu_get_song_data(sid)
if data is not None:
url = baidu_get_song_url(data)
title = baidu_get_song_title(data)
artist = baidu_get_song_artist(data)
album = baidu_get_song_album(data)
lrc = baidu_get_song_lyric(data)
file_name = "%s - %s - %s" % (title, album, artist)
else:
html = get_html("http://music.baidu.com/song/%s" % sid)
url = r1(r'data_url="([^"]+)"', html)
title = r1(r'data_name="([^"]+)"', html)
file_name = title
type, ext, size = url_info(url, faker=True)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], file_name, ext, size,
output_dir, merge=merge, faker=True)
try:
type, ext, size = url_info(lrc, faker=True)
print_info(site_info, title, type, size)
if not info_only:
download_urls([lrc], file_name, ext, size, output_dir, faker=True)
except:
pass
def baidu_download_album(aid, output_dir='.', merge=True, info_only=False):
html = get_html('http://music.baidu.com/album/%s' % aid, faker=True)
album_name = r1(r'<h2 class="album-name">(.+?)<\/h2>', html)
artist = r1(r'<span class="author_list" title="(.+?)">', html)
output_dir = '%s/%s - %s' % (output_dir, artist, album_name)
ids = json.loads(r1(r'<span class="album-add" data-adddata=\'(.+?)\'>',
html).replace('"', '').replace(';', '"'))['ids']
track_nr = 1
for id in ids:
song_data = baidu_get_song_data(id)
song_url = baidu_get_song_url(song_data)
song_title = baidu_get_song_title(song_data)
song_lrc = baidu_get_song_lyric(song_data)
file_name = '%02d.%s' % (track_nr, song_title)
type, ext, size = url_info(song_url, faker=True)
print_info(site_info, song_title, type, size)
if not info_only:
download_urls([song_url], file_name, ext, size,
output_dir, merge=merge, faker=True)
if song_lrc:
type, ext, size = url_info(song_lrc, faker=True)
print_info(site_info, song_title, type, size)
if not info_only:
download_urls([song_lrc], file_name, ext,
size, output_dir, faker=True)
track_nr += 1
def baidu_download(url, output_dir='.', stream_type=None, merge=True, info_only=False, **kwargs):
if re.match(r'https?://pan.baidu.com', url):
real_url, title, ext, size = baidu_pan_download(url)
print_info('BaiduPan', title, ext, size)
if not info_only:
print('Hold on...')
time.sleep(5)
download_urls([real_url], title, ext, size,
output_dir, url, merge=merge, faker=True)
elif re.match(r'http://music.baidu.com/album/\d+', url):
id = r1(r'http://music.baidu.com/album/(\d+)', url)
baidu_download_album(id, output_dir, merge, info_only)
elif re.match('http://music.baidu.com/song/\d+', url):
id = r1(r'http://music.baidu.com/song/(\d+)', url)
baidu_download_song(id, output_dir, merge, info_only)
elif re.match('http://tieba.baidu.com/', url):
try:
# embedded videos
embed_download(url, output_dir, merge=merge, info_only=info_only, **kwargs)
except:
# images
html = get_html(url)
title = r1(r'title:"([^"]+)"', html)
vhsrc = re.findall(r'"BDE_Image"[^>]+src="([^"]+\.mp4)"', html) or \
re.findall(r'vhsrc="([^"]+)"', html)
if len(vhsrc) > 0:
ext = 'mp4'
size = url_size(vhsrc[0])
print_info(site_info, title, ext, size)
if not info_only:
download_urls(vhsrc, title, ext, size,
output_dir=output_dir, merge=False)
items = re.findall(
r'//imgsrc.baidu.com/forum/w[^"]+/([^/"]+)', html)
urls = ['http://imgsrc.baidu.com/forum/pic/item/' + i
for i in set(items)]
# handle albums
kw = r1(r'kw=([^&]+)', html) or r1(r"kw:'([^']+)'", html)
tid = r1(r'tid=(\d+)', html) or r1(r"tid:'([^']+)'", html)
album_url = 'http://tieba.baidu.com/photo/g/bw/picture/list?kw=%s&tid=%s&pe=%s' % (kw, tid, 1000)
album_info = json.loads(get_content(album_url))
for i in album_info['data']['pic_list']:
urls.append(
'http://imgsrc.baidu.com/forum/pic/item/' + i['pic_id'] + '.jpg')
ext = 'jpg'
size = float('Inf')
print_info(site_info, title, ext, size)
if not info_only:
download_urls(urls, title, ext, size,
output_dir=output_dir, merge=False)
def baidu_pan_download(url):
errno_patt = r'errno":([^"]+),'
refer_url = ""
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Host': 'pan.baidu.com',
'Origin': 'http://pan.baidu.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2500.0 Safari/537.36',
'Referer': refer_url
}
if cookies:
print('Use user specified cookies')
else:
print('Generating cookies...')
fake_headers['Cookie'] = baidu_pan_gen_cookies(url)
refer_url = "http://pan.baidu.com"
html = get_content(url, fake_headers, decoded=True)
isprotected = False
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk = baidu_pan_parse(
html)
if sign == None:
if re.findall(r'\baccess-code\b', html):
isprotected = True
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk, fake_headers, psk = baidu_pan_protected_share(
url)
# raise NotImplementedError("Password required!")
if isprotected != True:
raise AssertionError("Share not found or canceled: %s" % url)
if bdstoken == None:
bdstoken = ""
if isprotected != True:
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk = baidu_pan_parse(
html)
request_url = "http://pan.baidu.com/api/sharedownload?sign=%s×tamp=%s&bdstoken=%s&channel=chunlei&clienttype=0&web=1&app_id=%s" % (
sign, timestamp, bdstoken, appid)
refer_url = url
post_data = {
'encrypt': 0,
'product': 'share',
'uk': uk,
'primaryid': primary_id,
'fid_list': '[' + fs_id + ']'
}
if isprotected == True:
post_data['sekey'] = psk
response_content = post_content(request_url, fake_headers, post_data, True)
errno = match1(response_content, errno_patt)
if errno != "0":
raise AssertionError(
"Server refused to provide download link! (Errno:%s)" % errno)
real_url = r1(r'dlink":"([^"]+)"', response_content).replace('\\/', '/')
title = r1(r'server_filename":"([^"]+)"', response_content)
assert real_url
type, ext, size = url_info(real_url, faker=True)
title_wrapped = json.loads('{"wrapper":"%s"}' % title)
title = title_wrapped['wrapper']
logging.debug(real_url)
return real_url, title, ext, size
def baidu_pan_parse(html):
sign_patt = r'sign":"([^"]+)"'
timestamp_patt = r'timestamp":([^"]+),'
appid_patt = r'app_id":"([^"]+)"'
bdstoken_patt = r'bdstoken":"([^"]+)"'
fs_id_patt = r'fs_id":([^"]+),'
uk_patt = r'uk":([^"]+),'
errno_patt = r'errno":([^"]+),'
primary_id_patt = r'shareid":([^"]+),'
sign = match1(html, sign_patt)
timestamp = match1(html, timestamp_patt)
appid = match1(html, appid_patt)
bdstoken = match1(html, bdstoken_patt)
fs_id = match1(html, fs_id_patt)
uk = match1(html, uk_patt)
primary_id = match1(html, primary_id_patt)
return sign, timestamp, bdstoken, appid, primary_id, fs_id, uk
def baidu_pan_gen_cookies(url, post_data=None):
from http import cookiejar
cookiejar = cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cookiejar))
resp = opener.open('http://pan.baidu.com')
if post_data != None:
resp = opener.open(url, bytes(parse.urlencode(post_data), 'utf-8'))
return cookjar2hdr(cookiejar)
def baidu_pan_protected_share(url):
print('This share is protected by password!')
inpwd = input('Please provide unlock password: ')
inpwd = inpwd.replace(' ', '').replace('\t', '')
print('Please wait...')
post_pwd = {
'pwd': inpwd,
'vcode': None,
'vstr': None
}
from http import cookiejar
import time
cookiejar = cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cookiejar))
resp = opener.open('http://pan.baidu.com')
resp = opener.open(url)
init_url = resp.geturl()
verify_url = 'http://pan.baidu.com/share/verify?%s&t=%s&channel=chunlei&clienttype=0&web=1' % (
init_url.split('?', 1)[1], int(time.time()))
refer_url = init_url
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Host': 'pan.baidu.com',
'Origin': 'http://pan.baidu.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2500.0 Safari/537.36',
'Referer': refer_url
}
opener.addheaders = dict2triplet(fake_headers)
pwd_resp = opener.open(verify_url, bytes(
parse.urlencode(post_pwd), 'utf-8'))
pwd_resp_str = ungzip(pwd_resp.read()).decode('utf-8')
pwd_res = json.loads(pwd_resp_str)
if pwd_res['errno'] != 0:
raise AssertionError(
'Server returned an error: %s (Incorrect password?)' % pwd_res['errno'])
pg_resp = opener.open('http://pan.baidu.com/share/link?%s' %
init_url.split('?', 1)[1])
content = ungzip(pg_resp.read()).decode('utf-8')
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk = baidu_pan_parse(
content)
psk = query_cookiejar(cookiejar, 'BDCLND')
psk = parse.unquote(psk)
fake_headers['Cookie'] = cookjar2hdr(cookiejar)
return sign, timestamp, bdstoken, appid, primary_id, fs_id, uk, fake_headers, psk
def cookjar2hdr(cookiejar):
cookie_str = ''
for i in cookiejar:
cookie_str = cookie_str + i.name + '=' + i.value + ';'
return cookie_str[:-1]
def query_cookiejar(cookiejar, name):
for i in cookiejar:
if i.name == name:
return i.value
def dict2triplet(dictin):
out_triplet = []
for i in dictin:
out_triplet.append((i, dictin[i]))
return out_triplet
site_info = "Baidu.com"
download = baidu_download
download_playlist = playlist_not_supported("baidu")
| 36.911585 | 140 | 0.591641 | [
"MIT"
] | 1156859110/you-get | src/you_get/extractors/baidu.py | 12,107 | Python |
# coding: utf-8
"""
Decision Lens API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import dlxapi
from dlxapi.models.field_description_updated_event import FieldDescriptionUpdatedEvent # noqa: E501
from dlxapi.rest import ApiException
class TestFieldDescriptionUpdatedEvent(unittest.TestCase):
"""FieldDescriptionUpdatedEvent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFieldDescriptionUpdatedEvent(self):
"""Test FieldDescriptionUpdatedEvent"""
# FIXME: construct object with mandatory attributes with example values
# model = dlxapi.models.field_description_updated_event.FieldDescriptionUpdatedEvent() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.902439 | 119 | 0.733595 | [
"MIT"
] | dlens/dlxapi | python/test/test_field_description_updated_event.py | 1,021 | Python |
#!/usr/bin/env python
import jinja2
import os
import re
import shlex
import sys
import mkdocs.build
from mkdocs.build import build
from mkdocs.config import load_config
from urllib2 import urlopen
import subprocess
def line_containing(lines, text):
for i in range(len(lines)):
if text.lower() in lines[i].lower():
return i
raise Exception("could not find {}".format(text))
# Wrap some functions to allow custom commands in markdown
convert_markdown_original = mkdocs.build.convert_markdown
def convert_markdown_new(source, **kwargs):
def expand(match):
args = shlex.split(match.groups()[0])
# Import external markdown
if args[0] == ".import":
code = ""
try: #Try as a URL
code = urlopen(args[1]).read()
except ValueError: # invalid URL, try as a file
code = open(args[1]).read()
return code
# Run a shell command
elif args[0] == ".run":
result = ""
command = "$ " + match.groups()[0].replace(".run", "").strip()
try:
result = subprocess.check_output(args[1:], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e:
result = e.output
return "```\n" + command + "\n" + result.strip() + "\n```"
# Source code embeds
elif args[0] == ".code" or args[0] == ".doc":
code = ""
try: #Try as a URL
code = urlopen(args[1]).read()
except ValueError: # invalid URL, try as a file
code = open("../" + args[1]).read()
lines = code.splitlines()
# Short hand for specifying a region
if len(args) == 3:
region = args[2]
args[2] = "START " + region
args.append("END " + region)
if len(args) == 4:
start = 1
end = len(lines) - 1
try:
if args[2].isdigit(): start = int(args[2])
else:
start = line_containing(lines, args[2]) + 1
if args[3].isdigit(): end = int(args[3])
else: end = line_containing(lines, args[3]) + 1
except Exception, e: # If line_containing fails
print "Error: {}".format(e)
print " in {}".format(args[1])
sys.exit(1)
#TODO: Also allow regex matching
lines = lines[start - 1:end]
# Trim "OMIT" lines. Ignore "*/".
lines = filter(lambda x: not x.strip().rstrip("*/").rstrip().lower().endswith("omit"), lines)
# TODO: Trim leading and trailing empty lines
if args[0] == ".code":
lines.insert(0, "```go")
lines.append("```")
# else: # args[0] == ".doc"
# lines.insert(0, "\n")
# lines.insert("\n")
return "\n".join(lines)
# No matching logic
else:
return match.group(0)
# Process an aritrary number of expansions.
oldSource = ""
while source != oldSource:
oldSource = source
source = re.sub("\[\[(.*)\]\]", expand, oldSource)
return convert_markdown_original(source)
# Hotpatch in the markdown conversion wrapper
mkdocs.build.convert_markdown = convert_markdown_new
if __name__ == "__main__":
# Build documentation
config = load_config(options=None)
build(config)
# Load templates
template_env = jinja2.Environment(loader = jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'theme')))
index_template = template_env.get_template('home.html')
community_template = template_env.get_template('community.html')
# Home page
with open('site/index.html', 'w') as f:
f.write(index_template.render(
page="home"
))
# Community page
with open('site/community.html', 'w') as f:
f.write(community_template.render(
page="community"
))
| 27.133333 | 119 | 0.60415 | [
"BSD-3-Clause"
] | fossabot/srclib | docs/buildsite.py | 3,663 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of moses. Its use is licensed under the GNU Lesser General
# Public License version 2.1 or, at your option, any later version.
from __future__ import print_function, unicode_literals
import logging
import argparse
import subprocess
import sys
import os
import codecs
# ../bilingual-lm
sys.path.append(os.path.join(os.path.dirname(sys.path[0]), 'bilingual-lm'))
import train_nplm
import extract_vocab
import extract_syntactic_ngrams
logging.basicConfig(
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument(
"--working-dir", dest="working_dir", metavar="PATH")
parser.add_argument(
"--corpus", '-text', dest="corpus_stem", metavar="PATH", help="Input file.")
parser.add_argument(
"--nplm-home", dest="nplm_home", metavar="PATH", required=True,
help="Location of NPLM.")
parser.add_argument(
"--epochs", dest="epochs", type=int, metavar="INT",
help="Number of training epochs (default: %(default)s).")
parser.add_argument(
"--up-context-size", dest="up_context_size", type=int, metavar="INT",
help="Size of ancestor context (default: %(default)s).")
parser.add_argument(
"--left-context-size", dest="left_context_size", type=int, metavar="INT",
help="Size of sibling context (left) (default: %(default)s).")
parser.add_argument(
"--right-context-size", dest="right_context_size", type=int,
metavar="INT",
help="Size of sibling context (right) (default: %(default)s).")
parser.add_argument(
"--mode", dest="mode", choices=['head', 'label'], required=True,
help="Type of RDLM to train (both are required for decoding).")
parser.add_argument(
"--minibatch-size", dest="minibatch_size", type=int, metavar="INT",
help="Minibatch size (default: %(default)s).")
parser.add_argument(
"--noise", dest="noise", type=int, metavar="INT",
help="Number of noise samples for NCE (default: %(default)s).")
parser.add_argument(
"--hidden", dest="hidden", type=int, metavar="INT",
help=(
"Size of hidden layer (0 for single hidden layer) "
"(default: %(default)s)"))
parser.add_argument(
"--input-embedding", dest="input_embedding", type=int, metavar="INT",
help="Size of input embedding layer (default: %(default)s).")
parser.add_argument(
"--output-embedding", dest="output_embedding", type=int, metavar="INT",
help="Size of output embedding layer (default: %(default)s).")
parser.add_argument(
"--threads", "-t", dest="threads", type=int, metavar="INT",
help="Number of threads (default: %(default)s).")
parser.add_argument(
"--output-model", dest="output_model", metavar="PATH",
help="Name of output model (default: %(default)s).")
parser.add_argument(
"--output-dir", dest="output_dir", metavar="PATH",
help="Output directory (default: same as working-dir).")
parser.add_argument(
"--config-options-file", dest="config_options_file", metavar="PATH")
parser.add_argument(
"--log-file", dest="log_file", metavar="PATH",
help="Log file to write to (default: %(default)s).")
parser.add_argument(
"--validation-corpus", dest="validation_corpus", metavar="PATH",
help="Validation file (default: %(default)s).")
parser.add_argument(
"--activation-function", dest="activation_fn",
choices=['identity', 'rectifier', 'tanh', 'hardtanh'],
help="Activation function (default: %(default)s).")
parser.add_argument(
"--learning-rate", dest="learning_rate", type=float, metavar="FLOAT",
help="Learning rate (default: %(default)s).")
parser.add_argument(
"--input-words-file", dest="input_words_file", metavar="PATH",
help="Input vocabulary (default: %(default)s).")
parser.add_argument(
"--output-words-file", dest="output_words_file", metavar="PATH",
help="Output vocabulary (default: %(default)s).")
parser.add_argument(
"--input-vocab-size", dest="input_vocab_size", type=int, metavar="INT",
help="Input vocabulary size (default: %(default)s).")
parser.add_argument(
"--output-vocab-size", dest="output_vocab_size", type=int, metavar="INT",
help="Output vocabulary size (default: %(default)s).")
parser.add_argument(
"--mmap", dest="mmap", action="store_true",
help="Use memory-mapped file (for lower memory consumption).")
parser.add_argument(
"--train-host", dest="train_host",
help="Execute nplm training on this host, via ssh")
parser.add_argument("--extra-settings", dest="extra_settings",
help="Extra settings to be passed to NPLM")
parser.set_defaults(
working_dir="working",
corpus_stem="train",
nplm_home="/home/bhaddow/tools/nplm",
epochs=2,
up_context_size=2,
left_context_size=3,
right_context_size=0,
minibatch_size=1000,
noise=100,
hidden=0,
mode='head',
input_embedding=150,
output_embedding=750,
threads=4,
output_model="train",
output_dir=None,
config_options_file="config",
log_file="log",
validation_corpus=None,
activation_fn="rectifier",
learning_rate=1,
input_words_file=None,
output_words_file=None,
input_vocab_size=500000,
output_vocab_size=500000)
def prepare_vocabulary(options):
vocab_prefix = os.path.join(options.working_dir, 'vocab')
extract_vocab_options = extract_vocab.create_parser().parse_args(
['--input', options.corpus_stem, '--output', vocab_prefix])
extract_vocab.main(extract_vocab_options)
if options.input_words_file is None:
options.input_words_file = vocab_prefix + '.input'
orig = vocab_prefix + '.all'
filtered_vocab = open(orig).readlines()
if options.input_vocab_size:
filtered_vocab = filtered_vocab[:options.input_vocab_size]
open(options.input_words_file, 'w').writelines(filtered_vocab)
if options.output_words_file is None:
options.output_words_file = vocab_prefix + '.output'
if options.mode == 'label':
blacklist = [
'<null',
'<root',
'<start_head',
'<dummy',
'<head_head',
'<stop_head',
]
orig = vocab_prefix + '.special'
filtered_vocab = open(orig).readlines()
orig = vocab_prefix + '.nonterminals'
filtered_vocab += open(orig).readlines()
filtered_vocab = [
word
for word in filtered_vocab
if not any(word.startswith(prefix) for prefix in blacklist)]
if options.output_vocab_size:
filtered_vocab = filtered_vocab[:options.output_vocab_size]
else:
orig = vocab_prefix + '.all'
filtered_vocab = open(orig).readlines()[:options.output_vocab_size]
open(options.output_words_file, 'w').writelines(filtered_vocab)
def main(options):
if options.output_dir is None:
options.output_dir = options.working_dir
else:
# Create output dir if necessary
if not os.path.exists(options.output_dir):
os.makedirs(options.output_dir)
options.ngram_size = (
2 * options.up_context_size +
2 * options.left_context_size +
2 * options.right_context_size
)
if options.mode == 'head':
options.ngram_size += 2
elif options.mode == 'label':
options.ngram_size += 1
if options.input_words_file is None or options.output_words_file is None:
sys.stderr.write(
"Either input vocabulary or output vocabulary not specified: "
"extracting vocabulary from training text.\n")
prepare_vocabulary(options)
numberized_file = os.path.basename(options.corpus_stem) + '.numberized'
train_file = numberized_file
if options.mmap:
train_file += '.mmap'
extract_options = extract_syntactic_ngrams.create_parser().parse_args([
'--input', options.corpus_stem,
'--output', os.path.join(options.working_dir, numberized_file),
'--vocab', options.input_words_file,
'--output_vocab', options.output_words_file,
'--right_context', str(options.right_context_size),
'--left_context', str(options.left_context_size),
'--up_context', str(options.up_context_size),
'--mode', options.mode
])
sys.stderr.write('extracting syntactic n-grams\n')
extract_syntactic_ngrams.main(extract_options)
if options.validation_corpus:
extract_options.input = open(options.validation_corpus)
options.validation_file = os.path.join(
options.working_dir, os.path.basename(options.validation_corpus))
extract_options.output = open(
options.validation_file + '.numberized', 'w')
sys.stderr.write('extracting syntactic n-grams (validation file)\n')
extract_syntactic_ngrams.main(extract_options)
extract_options.output.close()
else:
options.validation_file = None
if options.mmap:
try:
os.remove(os.path.join(options.working_dir, train_file))
except OSError:
pass
mmap_cmd = [os.path.join(options.nplm_home, 'src', 'createMmap'),
'--input_file',
os.path.join(options.working_dir, numberized_file),
'--output_file',
os.path.join(options.working_dir, train_file)
]
sys.stderr.write('creating memory-mapped file\n')
sys.stderr.write('executing: ' + ', '.join(mmap_cmd) + '\n')
ret = subprocess.call(mmap_cmd)
if ret:
raise Exception("creating memory-mapped file failed")
sys.stderr.write('training neural network\n')
train_nplm.main(options)
sys.stderr.write('averaging null words\n')
ret = subprocess.call([
os.path.join(sys.path[0], 'average_null_embedding.py'),
options.nplm_home,
os.path.join(
options.output_dir,
options.output_model + '.model.nplm.' + str(options.epochs)),
os.path.join(
options.working_dir,
numberized_file),
os.path.join(options.output_dir, options.output_model + '.model.nplm')
])
if ret:
raise Exception("averaging null words failed")
if __name__ == "__main__":
if sys.version_info < (3, 0):
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
options = parser.parse_known_args()[0]
if parser.parse_known_args()[1]:
sys.stderr.write('Warning: unknown arguments: {0}\n'.format(parser.parse_known_args()[1]))
main(options)
| 38.386525 | 98 | 0.65358 | [
"MIT"
] | Feecely/fairseq_bertnmt | examples/mosesdecoder-master/scripts/training/rdlm/train_rdlm.py | 10,825 | Python |
# qubit number=3
# total number=14
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.cx(input_qubit[3],input_qubit[0]) # number=11
prog.z(input_qubit[3]) # number=12
prog.cx(input_qubit[3],input_qubit[0]) # number=13
prog.z(input_qubit[1]) # number=8
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[3],input_qubit[0]) # number=5
prog.swap(input_qubit[3],input_qubit[0]) # number=6
prog.x(input_qubit[3]) # number=9
prog.x(input_qubit[3]) # number=10
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_noisy696.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.641304 | 118 | 0.634683 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | benchmark/startQiskit_noisy696.py | 2,543 | Python |
"""Provides the MenuItem class."""
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union
from attr import attrs
from ..action import ActionFunctionType
from ..mixins import RegisterEventMixin
if TYPE_CHECKING:
from ..types import TitleFunction
@attrs(auto_attribs=True)
class MenuItem(RegisterEventMixin):
"""An item in a :class:`~earwax.menu.Menu`.
This class is rarely used directly, instead
:meth:`earwax.menu.Menu.add_item` or :meth:`earwax.menu.Menu.item` can be
used to return an instance.
:ivar ~earwax.MenuItem.func: The function which will be called when this
item is activated.
:ivar ~earwax.MenuItem.title: The title of this menu item.
If this value is a callable, it should return a string which will be
used as the title.
:ivar ~earwax.MenuItem.select_sound_path: The path to a sound which should
play when this menu item is selected.
If this value is ``None`` (the default), then no sound will be heard
unless the containing menu has its
:attr:`~earwax.Menu.item_select_sound_path` attribute set to something
that is not ``None``, or
:attr:`earwax.EarwaxConfig.menus.default_item_select_sound` is not
``None``.
:ivar ~earwax.MenuItem.activate_sound_path: The path to a sound which
should play when this menu item is activated.
If this value is ``None`` (the default), then no sound will be heard
unless the containing menu has its
:attr:`~earwax.Menu.item_activate_sound_path` attribute set to
something that is not ``None``, or
:attr:`earwax.EarwaxConfig.menus.default_item_select_sound` is not
``None``.
"""
func: ActionFunctionType
title: Optional[Union[str, "TitleFunction"]] = None
select_sound_path: Optional[Path] = None
loop_select_sound: bool = False
activate_sound_path: Optional[Path] = None
def __attrs_post_init__(self) -> None:
"""Register events."""
self.register_event(self.on_selected)
def get_title(self) -> Optional[str]:
"""Return the proper title of this object.
If :attr:`self.title <earwax.mixins.TitleMixin.title>` is a callable,
its return value will be returned.
"""
if callable(self.title):
return self.title()
return self.title
def on_selected(self) -> None:
"""Handle this menu item being selected."""
pass
| 33.2 | 78 | 0.677108 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | chrisnorman7/earwax | earwax/menus/menu_item.py | 2,490 | Python |
import numpy as np
import matplotlib.pyplot as plt
def subf(n):
if n <= 1:
return 0
elif n == 2:
return 1
return (n - 1) * (subf(n - 1) + subf(n - 2))
x = np.arange(1, 5, 1)
y = np.vectorize(subf)(x)
plt.plot(x, y)
plt.show()
| 11.045455 | 45 | 0.572016 | [
"Apache-2.0"
] | MarcusTL12/School | 18Host/TMA4120/latex/PyplotTesting/Scripts/subfactorial.py | 243 | Python |
# -*- coding: utf-8 -*-
from django.urls import path
app_name = 'login'
urlpatterns = []
| 15.833333 | 29 | 0.610526 | [
"MIT"
] | AlcindoSchleder/dashboard | apps/login/urls.py | 95 | Python |
from datetime import datetime
import numpy as np
import re
from bamboo.core.frame import RESERVED_KEYS
from bamboo.core.parser import Parser
from bamboo.lib.exceptions import ArgumentError
from bamboo.lib.mongo import reserve_encoded
CARDINALITY = 'cardinality'
OLAP_TYPE = 'olap_type'
SIMPLETYPE = 'simpletype'
LABEL = 'label'
# olap_types
DIMENSION = 'dimension'
MEASURE = 'measure'
# simpletypes
BOOLEAN = 'boolean'
DATETIME = 'datetime'
INTEGER = 'integer'
FLOAT = 'float'
STRING = 'string'
# map from numpy objects to olap_types
DTYPE_TO_OLAP_TYPE = {
np.object_: DIMENSION,
np.bool_: DIMENSION,
np.float64: MEASURE,
np.int64: MEASURE,
datetime: MEASURE,
}
# map from numpy objects to simpletypes
DTYPE_TO_SIMPLETYPE = {
np.bool_: BOOLEAN,
np.float64: FLOAT,
np.int64: INTEGER,
np.object_: STRING,
datetime: DATETIME,
}
SIMPLETYPE_TO_DTYPE = {
FLOAT: np.float64,
INTEGER: np.int64,
}
SIMPLETYPE_TO_OLAP_TYPE = {
v: DTYPE_TO_OLAP_TYPE[k] for (k, v) in DTYPE_TO_SIMPLETYPE.items()}
RE_ENCODED_COLUMN = re.compile(ur'(?u)\W')
class Schema(dict):
@classmethod
def safe_init(cls, arg):
"""Make schema with potential arg of None."""
return cls() if arg is None else cls(arg)
@property
def labels_to_slugs(self):
"""Build dict from column labels to slugs."""
return {
column_attrs[LABEL]: reserve_encoded(column_name) for
(column_name, column_attrs) in self.items()
}
@property
def numerics(self):
return [slug for slug, col_schema in self.items()
if col_schema[SIMPLETYPE] in [INTEGER, FLOAT]]
@property
def numerics_select(self):
return {col: 1 for col in self.numerics}
def cardinality(self, column):
if self.is_dimension(column):
return self[column].get(CARDINALITY)
def convert_type(self, slug, value):
column_schema = self.get(slug)
if column_schema:
type_func = SIMPLETYPE_TO_DTYPE.get(column_schema[SIMPLETYPE])
if type_func:
value = type_func(value)
return value
def datetimes(self, intersect_with):
return [slug for slug, col in self.items()
if col[SIMPLETYPE] == DATETIME and slug in intersect_with]
def is_date_simpletype(self, column):
return self[column][SIMPLETYPE] == DATETIME
def is_dimension(self, column):
col_schema = self.get(column)
return col_schema and col_schema[OLAP_TYPE] == DIMENSION
def rebuild(self, dframe, overwrite=False):
"""Rebuild a schema for a dframe.
:param dframe: The DataFrame whose schema to merge with the current
schema.
:param overwrite: If true replace schema, otherwise update.
"""
current_schema = self
new_schema = schema_from_dframe(dframe, self)
if current_schema and not overwrite:
# merge new schema with existing schema
current_schema.update(new_schema)
new_schema = current_schema
return new_schema
def rename_map_for_dframe(self, dframe):
"""Return a map from dframe columns to slugs.
:param dframe: The DataFrame to produce the map for.
"""
labels_to_slugs = self.labels_to_slugs
return {
column: labels_to_slugs[column] for column in
dframe.columns.tolist() if self._resluggable_column(
column, labels_to_slugs, dframe)
}
def set_olap_type(self, column, olap_type):
"""Set the OLAP Type for this `column` of schema.
Only columns with an original OLAP Type of 'measure' can be modified.
This includes columns with Simple Type integer, float, and datetime.
:param column: The column to set the OLAP Type for.
:param olap_type: The OLAP Type to set. Must be 'dimension' or
'measure'.
:raises: `ArgumentError` if trying to set the OLAP Type of an column
whose OLAP Type was not originally a 'measure'.
"""
self[column][OLAP_TYPE] = olap_type
def _resluggable_column(self, column, labels_to_slugs, dframe):
"""Test if column should be slugged.
A column should be slugged if:
1. The `column` is a key in `labels_to_slugs` and
2. The `column` is not a value in `labels_to_slugs` or
1. The `column` label is not equal to the `column` slug and
2. The slug is not in the `dframe`'s columns
:param column: The column to reslug.
:param labels_to_slugs: The labels to slugs map (only build once).
:param dframe: The DataFrame that column is in.
"""
return (column in labels_to_slugs.keys() and (
not column in labels_to_slugs.values() or (
labels_to_slugs[column] != column and
labels_to_slugs[column] not in dframe.columns)))
def schema_from_dframe(dframe, schema=None):
"""Build schema from the DataFrame and a schema.
:param dframe: The DataFrame to build a schema for.
:param schema: Existing schema, optional.
:returns: A dictionary schema.
"""
dtypes = dframe.dtypes.to_dict()
column_names = list()
names_to_labels = dict()
# use existing labels for existing columns
for name in dtypes.keys():
if name not in RESERVED_KEYS:
column_names.append(name)
if schema:
schema_for_name = schema.get(name)
if schema_for_name:
names_to_labels[name] = schema_for_name[
LABEL]
encoded_names = dict(zip(column_names, _slugify_columns(column_names)))
schema = Schema()
for (name, dtype) in dtypes.items():
if name not in RESERVED_KEYS:
column_schema = {
LABEL: names_to_labels.get(name, name),
OLAP_TYPE: _olap_type_for_data_and_dtype(
dframe[name], dtype),
SIMPLETYPE: _simpletype_for_data_and_dtype(
dframe[name], dtype),
}
try:
column_schema[CARDINALITY] = dframe[
name].nunique()
except AttributeError:
pass
except TypeError:
# E.g. dates with and without offset can not be compared and
# raise a type error.
pass
schema[encoded_names[name]] = column_schema
return schema
def _slugify_columns(column_names):
"""Convert list of strings into unique slugs.
Convert non-alphanumeric characters in column names into underscores and
ensure that all column names are unique.
:param column_names: A list of strings.
:returns: A list of slugified names with a one-to-one mapping to
`column_names`.
"""
encoded_names = []
for column_name in column_names:
slug = RE_ENCODED_COLUMN.sub('_', column_name).lower()
slug = make_unique(slug, encoded_names + Parser.reserved_words)
encoded_names.append(slug)
return encoded_names
def make_unique(name, reserved_names):
"""Return a slug ensuring name is not in `reserved_names`.
:param name: The name to make unique.
:param reserved_names: A list of names the column must not be included in.
"""
while name in reserved_names:
name += '_'
return name
def filter_schema(schema):
"""Remove not settable columns."""
for column, column_schema in schema.iteritems():
if column_schema.get(CARDINALITY):
del column_schema[CARDINALITY]
schema[column] = column_schema
return schema
def _olap_type_for_data_and_dtype(column, dtype):
return _type_for_data_and_dtypes(
DTYPE_TO_OLAP_TYPE, column, dtype.type)
def _simpletype_for_data_and_dtype(column, dtype):
return _type_for_data_and_dtypes(
DTYPE_TO_SIMPLETYPE, column, dtype.type)
def _type_for_data_and_dtypes(type_map, column, dtype_type):
has_datetime = any([isinstance(field, datetime) for field in column])
return type_map[datetime if has_datetime else dtype_type]
| 30.149635 | 78 | 0.641932 | [
"BSD-3-Clause"
] | SEL-Columbia/bamboo | bamboo/lib/schema_builder.py | 8,261 | Python |
import numpy as np
import theano.tensor as tt
from pymc3.util import get_variable_name
from ..math import logsumexp
from .dist_math import bound
from .distribution import Discrete, Distribution, draw_values, generate_samples
from .continuous import get_tau_sd, Normal
def all_discrete(comp_dists):
"""
Determine if all distributions in comp_dists are discrete
"""
if isinstance(comp_dists, Distribution):
return isinstance(comp_dists, Discrete)
else:
return all(isinstance(comp_dist, Discrete) for comp_dist in comp_dists)
class Mixture(Distribution):
R"""
Mixture log-likelihood
Often used to model subpopulation heterogeneity
.. math:: f(x \mid w, \theta) = \sum_{i = 1}^n w_i f_i(x \mid \theta_i)
======== ============================================
Support :math:`\cap_{i = 1}^n \textrm{support}(f_i)`
Mean :math:`\sum_{i = 1}^n w_i \mu_i`
======== ============================================
Parameters
----------
w : array of floats
w >= 0 and w <= 1
the mixture weights
comp_dists : multidimensional PyMC3 distribution (e.g. `pm.Poisson.dist(...)`)
or iterable of one-dimensional PyMC3 distributions the
component distributions :math:`f_1, \ldots, f_n`
Example
-------
.. code-block:: python
# 2-Mixture Poisson distribution
with pm.Model() as model:
lam = pm.Exponential('lam', lam=1, shape=(2,)) # `shape=(2,)` indicates two mixtures.
# As we just need the logp, rather than add a RV to the model, we need to call .dist()
components = pm.Poisson.dist(mu=lam, shape=(2,))
w = pm.Dirichlet('w', a=np.array([1, 1])) # two mixture component weights.
like = pm.Mixture('like', w=w, comp_dists=components, observed=data)
# 2-Mixture Poisson using iterable of distributions.
with pm.Model() as model:
lam1 = pm.Exponential('lam1', lam=1)
lam2 = pm.Exponential('lam2', lam=1)
pois1 = pm.Poisson.dist(mu=lam1)
pois2 = pm.Poisson.dist(mu=lam2)
w = pm.Dirichlet('w', a=np.array([1, 1]))
like = pm.Mixture('like', w=w, comp_dists = [pois1, pois2], observed=data)
"""
def __init__(self, w, comp_dists, *args, **kwargs):
shape = kwargs.pop('shape', ())
self.w = w = tt.as_tensor_variable(w)
self.comp_dists = comp_dists
defaults = kwargs.pop('defaults', [])
if all_discrete(comp_dists):
dtype = kwargs.pop('dtype', 'int64')
else:
dtype = kwargs.pop('dtype', 'float64')
try:
self.mean = (w * self._comp_means()).sum(axis=-1)
if 'mean' not in defaults:
defaults.append('mean')
except AttributeError:
pass
try:
comp_modes = self._comp_modes()
comp_mode_logps = self.logp(comp_modes)
self.mode = comp_modes[tt.argmax(w * comp_mode_logps, axis=-1)]
if 'mode' not in defaults:
defaults.append('mode')
except AttributeError:
pass
super(Mixture, self).__init__(shape, dtype, defaults=defaults,
*args, **kwargs)
def _comp_logp(self, value):
comp_dists = self.comp_dists
try:
value_ = value if value.ndim > 1 else tt.shape_padright(value)
return comp_dists.logp(value_)
except AttributeError:
return tt.stack([comp_dist.logp(value) for comp_dist in comp_dists],
axis=1)
def _comp_means(self):
try:
return tt.as_tensor_variable(self.comp_dists.mean)
except AttributeError:
return tt.stack([comp_dist.mean for comp_dist in self.comp_dists],
axis=1)
def _comp_modes(self):
try:
return tt.as_tensor_variable(self.comp_dists.mode)
except AttributeError:
return tt.stack([comp_dist.mode for comp_dist in self.comp_dists],
axis=1)
def _comp_samples(self, point=None, size=None, repeat=None):
try:
samples = self.comp_dists.random(point=point, size=size, repeat=repeat)
except AttributeError:
samples = np.column_stack([comp_dist.random(point=point, size=size, repeat=repeat)
for comp_dist in self.comp_dists])
return np.squeeze(samples)
def logp(self, value):
w = self.w
return bound(logsumexp(tt.log(w) + self._comp_logp(value), axis=-1).sum(),
w >= 0, w <= 1, tt.allclose(w.sum(axis=-1), 1),
broadcast_conditions=False)
def random(self, point=None, size=None, repeat=None):
def random_choice(*args, **kwargs):
w = kwargs.pop('w')
w /= w.sum(axis=-1, keepdims=True)
k = w.shape[-1]
if w.ndim > 1:
return np.row_stack([np.random.choice(k, p=w_) for w_ in w])
else:
return np.random.choice(k, p=w, *args, **kwargs)
w = draw_values([self.w], point=point)[0]
w_samples = generate_samples(random_choice,
w=w,
broadcast_shape=w.shape[:-1] or (1,),
dist_shape=self.shape,
size=size).squeeze()
comp_samples = self._comp_samples(point=point, size=size, repeat=repeat)
if comp_samples.ndim > 1:
return np.squeeze(comp_samples[np.arange(w_samples.size), w_samples])
else:
return np.squeeze(comp_samples[w_samples])
class NormalMixture(Mixture):
R"""
Normal mixture log-likelihood
.. math::
f(x \mid w, \mu, \sigma^2) = \sum_{i = 1}^n w_i N(x \mid \mu_i, \sigma^2_i)
======== =======================================
Support :math:`x \in \mathbb{R}`
Mean :math:`\sum_{i = 1}^n w_i \mu_i`
Variance :math:`\sum_{i = 1}^n w_i^2 \sigma^2_i`
======== =======================================
Parameters
----------
w : array of floats
w >= 0 and w <= 1
the mixture weights
mu : array of floats
the component means
sd : array of floats
the component standard deviations
tau : array of floats
the component precisions
Note: You only have to pass in sd or tau, but not both.
"""
def __init__(self, w, mu, *args, **kwargs):
_, sd = get_tau_sd(tau=kwargs.pop('tau', None),
sd=kwargs.pop('sd', None))
distshape = np.broadcast(mu, sd).shape
self.mu = mu = tt.as_tensor_variable(mu)
self.sd = sd = tt.as_tensor_variable(sd)
if not distshape:
distshape = np.broadcast(mu.tag.test_value, sd.tag.test_value).shape
super(NormalMixture, self).__init__(w, Normal.dist(mu, sd=sd, shape=distshape),
*args, **kwargs)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
mu = dist.mu
w = dist.w
sd = dist.sd
name = r'\text{%s}' % name
return r'${} \sim \text{{NormalMixture}}(\mathit{{w}}={},~\mathit{{mu}}={},~\mathit{{sigma}}={})$'.format(name,
get_variable_name(w),
get_variable_name(mu),
get_variable_name(sd))
| 34.486607 | 119 | 0.535016 | [
"Apache-2.0"
] | himkt/pymc3 | pymc3/distributions/mixture.py | 7,725 | Python |
'''
Class containing a Massey-style model and rankings of a season.
todo: documentation
todo: type hints
todo: inherit from Model?
'''
class Massey:
def __init__(self):
'''
todo: this. what fields does it need?
'''
pass
def rank(self) -> List[Team]:
'''
Given a matrix, create a power ranking of the teams
'''
pass
def predict_bracket(self) -> Bracket:
'''
Given a ranking of the teams, and the draw for the bracket, predict who wins and stuff
'''
pass
@staticmethod
def from_file(filename: str) -> Massey:
'''
todo: docs
todo: weighting param?
parse teams and games from file
create matrix from teams and games
'''
pass | 21.675676 | 94 | 0.562344 | [
"Apache-2.0"
] | alhart2015/march-madness | src/models/Massey.py | 802 | Python |
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# directory.py - parse directory information
# -----------------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------------
# kaa-Metadata - Media Metadata for Python
# Copyright (C) 2003-2006 Thomas Schueppel, Dirk Meyer
#
# First Edition: Dirk Meyer <[email protected]>
# Maintainer: Dirk Meyer <[email protected]>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
__all__ = ['Parser']
# python imports
import os
import logging
# kaa imports
import kaa
# kaa.metadata imports
import kaa.metadata.core as core
from kaa.metadata.image.core import BinsParser
# get logging object
log = logging.getLogger('metadata')
class Directory(core.Media):
"""
Simple parser for reading a .directory file.
"""
media = core.MEDIA_DIRECTORY
def __init__(self, directory):
core.Media.__init__(self)
# search .directory
info = os.path.join(directory, '.directory')
if os.path.isfile(info):
f = open(info)
for l in f.readlines():
if l.startswith('Icon='):
image = l[5:].strip()
if not image.startswith('/'):
image = os.path.join(directory, image)
if os.path.isfile(image):
self._set('image', image)
if l.startswith('Name='):
self.title = l[5:].strip()
if l.startswith('Comment='):
self.comment = l[8:].strip()
f.close()
# search album.xml (bins)
binsxml = os.path.join(directory, 'album.xml')
if os.path.isfile(binsxml):
bins = BinsParser(binsxml)
for key, value in list(bins.items()):
if key == 'sampleimage':
image = os.path.join(directory, kaa.unicode_to_str(value))
if os.path.isfile(image):
self._set('image', image)
continue
self._set(key, value)
# find folder.jpg (windows style cover)
folderjpg = os.path.join(directory, 'folder.jpg')
if os.path.isfile(folderjpg):
self._set('image', folderjpg)
self.mime = 'text/directory'
Parser = Directory
| 34.425532 | 79 | 0.549444 | [
"MIT"
] | jtackaberry/stagehand | external/metadata/misc/directory.py | 3,236 | Python |
from Transform.Transform import *
qhost = '10.0.0.10'
qport = 5100
bucket_name = 's3a://insighttmpbucket1/'
index_name = bucket_name + 'index.txt'
tickers = get_stock_list(index_name)
q_con, flint_con, spark_con = connect(qhost, qport)
#push_raw_table(q_con, spark_con, flint_con, bucket_name, tickers)
push_returns(q_con, spark_con, flint_con, bucket_name, tickers)
| 28.461538 | 66 | 0.775676 | [
"MIT"
] | adityagc/Epoch | src/main.py | 370 | Python |
# This code is a part of XMM: Generate and Analyse (XGA), a module designed for the XMM Cluster Survey (XCS).
# Last modified by David J Turner ([email protected]) 21/01/2021, 11:45. Copyright (c) David J Turner
from .fit import single_temp_apec, power_law, single_temp_apec_profile
from .run import execute_cmd, xspec_call
| 37.555556 | 110 | 0.763314 | [
"BSD-3-Clause"
] | DavidT3/XGA | xga/xspec/__init__.py | 338 | Python |
for i in range(1,int(input())+1): #More than 2 lines will result in 0 score. Do not leave a blank line also
print((pow(int(pow(10, i)//9), 2)))
| 20 | 108 | 0.6 | [
"MIT"
] | abivilion/Hackerank-Solutions- | Python/Math/triangle_quest_2.py | 160 | Python |
"""trefechanwen URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from . import views as views
from rest_framework import routers
from bookings import views as bookings_views
router = routers.DefaultRouter()
router.register(r'availabilitydates', bookings_views.AvailabilityDateViewSet, base_name='AvailabilityDates')
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^cottage$', views.cottage, name='cottage'),
url(r'^barn$', views.barn, name='barn'),
url(r'^availability$', views.availability, name='availability'),
url(r'^localinfo$', views.localinfo, name='localinfo'),
url(r'^location$', views.location, name='location'),
url(r'^walking$', views.walking, name='walking'),
url(r'^beaches$', views.beaches, name='beaches'),
url(r'^wildlife', views.wildlife, name='wildlife'),
url(r'^contact$', views.contact, name='contact'),
url(r'^covid$', views.covid, name='covid'),
url(r'^admin/', admin.site.urls),
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| 41.547619 | 108 | 0.697994 | [
"MIT"
] | HedgehogProductions/trefechanwen | trefechanwen/urls.py | 1,745 | Python |
#!/usr/bin/env python
# coding: utf-8
# # Exploring JHU COVID Case, Death, and Vaccine Information
# This notebook takes the live, updated data from JHU CSSE and GovEx, formats and simplifies it for my purposes, and saves it in csv files in the same directory. The two data sources use slightly different conventions and provide data for slightly different locations, so I standardized column names and kept only those rows common to both datasets. It makes most sense for this to be run once, so that the same data is used every time. In the future, it could be worthwhile to make the processes in this project run on 'live' data, but not for the purposes of this project at this time.
#
# #### Data Sources
# * [Case Data - JHU CSSE](https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv)
# * [Vaccine Data - JHU GovEx](https://raw.githubusercontent.com/govex/COVID-19/master/data_tables/vaccine_data/global_data/time_series_covid19_vaccine_doses_admin_global.csv)
#
# #### Technical Sources
# * [Pandas Documentation](https://pandas.pydata.org/docs/)
# * [MatPlotLib.PyPlot Documentation](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.html)
# * [Standardizing Dates with `datetime.datetime` - Stack Overflow](https://stackoverflow.com/questions/4709652/python-regex-to-match-dates)
# * [Getting Only Date in `datetime.datetime`](https://stackoverflow.com/questions/18039680/django-get-only-date-from-datetime-strptime)
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn
import seaborn as sns
import sys
# ## Case Info
# In[2]:
case_data = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
print(case_data.shape)
case_data.head()
#
# In[3]:
plt.scatter(case_data['3/23/20'], case_data['3/23/21'])
plt.xlim([0, 2000])
plt.ylim([0, 10000])
plt.title('Relations in COVID Case Count Over One Year in Different Countries')
plt.xlabel('Cases on 3/23/2020')
plt.ylabel('Cases on 3/23/2021')
plt.plot(range(2000))
# The above plot is pretty useless in terms of correlation since we know (logically) that total case numbers can only increase. However, it provides a good example of the extremity of difference in scale of typical case numbers (within the range plotted) between early 2020 and early 2021. I also used it just to make sure there wouldn't be any obvious weird things with the data.
#
# The below table indicates mean case count for each day listed. The drastic change is obvious.
# In[4]:
case_data.mean(numeric_only=True)
# ## Vaccine Info
# In[5]:
vaccine_data = pd.read_csv('https://raw.githubusercontent.com/govex/COVID-19/master/data_tables/vaccine_data/global_data/time_series_covid19_vaccine_doses_admin_global.csv')
print(vaccine_data.shape)
vaccine_data.head()
# ## Standardizing Case and Vaccine Info
# The first step is to standardize columns by deleting unnecessary ones and establishing common naming conventions between the two files to minimize mistakes when referring to them:
# In[6]:
# Rename geographic columns in vaccine data to standardize
rename_conventions = {'Province_State': 'Province/State', 'Country_Region': 'Country', 'Country/Region': 'Country'}
case_data.rename(columns=rename_conventions, inplace=True)
vaccine_data.rename(columns=rename_conventions, inplace=True)
# Standardize dates
import datetime
def date_fixer(old_date):
data_type = ''
is_date = False
if len(old_date) == 10 and old_date[4] == '-': # is of format YYYY-MM-DD
date = datetime.datetime.strptime(old_date,'%Y-%m-%d').date()
data_type = 'Vaccinations'
is_date = True
elif len(old_date) >= 6 and old_date[2] == '/' or old_date[1] == '/': # is of format (M)M/(D)D/YY
date = datetime.datetime.strptime(old_date, '%m/%d/%y').date()
data_type = 'Cases'
is_date = True
return str('{}/{}/{} {}'.format(date.month, date.day, date.year, data_type)) if is_date else old_date + data_type
vaccine_data.rename(columns=date_fixer, inplace=True)
case_data.rename(columns=date_fixer, inplace=True)
# Next, I deleted the columns that weren't dates or Country/Region and State/Province. I may later want to use population, but not yet.
# In[7]:
case_data.drop(columns=['Lat', 'Long', 'Province/State'], inplace=True)
vaccine_data.drop(columns=['UID', 'iso2', 'iso3', 'code3', 'FIPS', 'Admin2', 'Lat', 'Long_', 'Combined_Key', 'Population', 'Province/State'], inplace=True)
# Next, I sorted the data, filled in null values with 0, combined rows from the same country, and merged the dataframes.
# In[8]:
case_data.sort_values(by='Country', inplace=True)
vaccine_data.sort_values(by='Country', inplace=True)
vaccine_data.fillna(0.0, inplace=True)
case_data.fillna(0, inplace=True)
case_data = case_data.groupby(['Country']).sum()
vaccine_data = vaccine_data.groupby(['Country']).sum()
case_data.to_csv('case-data.csv')
vaccine_data.to_csv('vaccine-data.csv')
full_data = pd.merge(case_data, vaccine_data, how='inner', on='Country')
print('case data size:', case_data.shape, 'vaccine data size:', vaccine_data.shape, 'full data size:', full_data.shape)
# The next step was to look at all the country names, so I can manually see if I want to get rid of any. I decided to keep them all, at least for now.
# In[9]:
pd.set_option('display.max_seq_items', None)
full_data.index
# Finally, I saved the data into a csv file which can be referenced later. The below cell should really be run once only, so that the same data is used each time. One way to update this project could be to reload the data automatically.
# In[10]:
full_data.to_csv('full-data.csv')
| 40.268966 | 587 | 0.7503 | [
"CC0-1.0"
] | emmmoore/vaccinesandcases | .scripts/dataprep.py | 5,839 | Python |
from typing import Tuple
import math
import torch
from torch.optim.optimizer import Optimizer
def linear_warmup_and_cosine_protocol(
f_values: Tuple[float, float, float],
x_milestones: Tuple[int, int, int, int]):
"""
There are 5 regions:
1. constant at f0 for x < x0
2. linear increase from f0 to f1 for x0 < x < x1
3. constant at f1 for x1 < x < x2
4. cosine protocol from f1 to f2 for x2 < x < x3
5. constant at f2 for x > x3
If you want a linear_ramp followed by a cosine_decay only simply set:
1. x0=0 (to eliminate the first constant piece)
2. x2=x1 (to eliminate the second constant piece)
3. max_epochs=x3 (to make the simulation stop after the linear or cosine decay)
"""
assert x_milestones[0] <= x_milestones[1] <= x_milestones[2] <= x_milestones[3]
def fn(step):
if step <= x_milestones[0]:
return float(f_values[0])
elif (step > x_milestones[0]) and (step <= x_milestones[1]):
m = float(f_values[1] - f_values[0]) / float(max(1, x_milestones[1] - x_milestones[0]))
return float(f_values[0]) + m * float(step - x_milestones[0])
elif (step > x_milestones[1]) and (step <= x_milestones[2]):
return float(f_values[1])
elif (step > x_milestones[2]) and (step <= x_milestones[3]):
progress = float(step - x_milestones[2]) / float(max(1, x_milestones[3] - x_milestones[2])) # in (0,1)
tmp = 0.5 * (1.0 + math.cos(math.pi * progress)) # in (1,0)
return float(f_values[2]) + tmp * float(f_values[1] - f_values[2])
else:
return float(f_values[2])
return fn
class LARS(Optimizer):
"""
Extends SGD in PyTorch with LARS scaling from the paper
'Large batch training of Convolutional Networks <https://arxiv.org/pdf/1708.03888.pdf>'_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
trust_coefficient (float, optional): trust coefficient for computing LR (default: 0.001)
eps (float, optional): eps for division denominator (default: 1e-8)
Example:
>>> model = torch.nn.Linear(10, 1)
>>> input = torch.Tensor(10)
>>> target = torch.Tensor([1.])
>>> loss_fn = lambda input, target: (input - target) ** 2
>>> #
>>> optimizer = LARS(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
Note:
The application of momentum in the SGD part is modified according to
the PyTorch standards. LARS scaling fits into the equation in the
following fashion.
.. math::
\begin{aligned}
g_{t+1} & = \text{lars_lr} * (\beta * p_{t} + g_{t+1}), \\
v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\
p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
\\end{aligned}
where :math:`p`, :math:`g`, :math:`v`, :math:`\\mu` and :math:`\beta` denote the
parameters, gradient, velocity, momentum, and weight decay respectively.
The :math:`lars_lr` is defined by Eq. 6 in the paper.
The Nesterov version is analogously modified.
.. warning::
Parameters with weight decay set to 0 will automatically be excluded from
layer-wise LR scaling. This is to ensure consistency with papers like SimCLR
and BYOL.
"""
def __init__(
self,
params,
lr=None,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
trust_coefficient=0.001,
eps=1e-8,
):
if lr is None or lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
trust_coefficient=trust_coefficient,
eps=eps,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# exclude scaling for params with 0 weight decay
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad
p_norm = torch.norm(p.data)
g_norm = torch.norm(p.grad.data)
# lars scaling + weight decay part
if weight_decay != 0:
if p_norm != 0 and g_norm != 0:
lars_lr = p_norm / (g_norm + p_norm * weight_decay + group["eps"])
lars_lr *= group["trust_coefficient"]
d_p = d_p.add(p, alpha=weight_decay)
d_p *= lars_lr
# sgd part
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.clone(d_p).detach()
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group["lr"])
return loss
| 38.127778 | 115 | 0.560105 | [
"Apache-2.0"
] | broadinstitute/tissue_purifier | src/tissue_purifier/models/_optim_scheduler.py | 6,863 | Python |
import uuid
from django.contrib.postgres.fields import ArrayField
from django.db import models
from api.common.models import TimestampableModel
from api.flags.enums import FlagLevels, FlagStatuses, FlagColours, FlagPermissions
from api.teams.models import Team
class FlagManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
class Flag(TimestampableModel):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(default="Untitled Flag", unique=True, max_length=100)
team = models.ForeignKey(Team, on_delete=models.CASCADE)
level = models.CharField(choices=FlagLevels.choices, max_length=20)
status = models.CharField(choices=FlagStatuses.choices, default=FlagStatuses.ACTIVE, max_length=20)
label = models.CharField(max_length=15, null=True, blank=True)
colour = models.CharField(choices=FlagColours.choices, default=FlagColours.DEFAULT, max_length=20)
priority = models.PositiveSmallIntegerField(default=0)
blocks_finalising = models.BooleanField(default=False)
removable_by = models.CharField(choices=FlagPermissions.choices, default=FlagPermissions.DEFAULT, max_length=50)
objects = FlagManager()
class Meta:
db_table = "flag"
ordering = ["team"]
def natural_key(self):
return (self.name,)
class FlaggingRuleManager(models.Manager):
def get_by_natural_key(
self,
team_name,
level,
status,
flag,
matching_values,
matching_groups,
excluded_values,
is_for_verified_goods_only,
):
return self.get(
team__name=team_name,
level=level,
status=status,
flag__name=flag,
matching_values=matching_values,
matching_groups=matching_groups,
excluded_values=excluded_values,
is_for_verified_goods_only=is_for_verified_goods_only,
)
class FlaggingRule(TimestampableModel):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
team = models.ForeignKey(Team, on_delete=models.CASCADE)
level = models.CharField(choices=FlagLevels.choices, max_length=20)
status = models.CharField(choices=FlagStatuses.choices, default=FlagStatuses.ACTIVE, max_length=20)
flag = models.ForeignKey(Flag, on_delete=models.CASCADE, related_name="flagging_rules")
matching_values = ArrayField(models.TextField(default=""), default=list)
matching_groups = ArrayField(models.TextField(default=""), default=list)
excluded_values = ArrayField(models.TextField(default=""), default=list)
is_for_verified_goods_only = models.BooleanField(null=True, blank=True)
objects = FlaggingRuleManager()
class Meta:
db_table = "flagging_rule"
indexes = [models.Index(fields=["created_at"])]
ordering = ["team__name", "-created_at"]
def natural_key(self):
return (
self.team.name,
self.level,
self.status,
self.flag.name,
self.matching_values,
self.matching_groups,
self.excluded_values,
self.is_for_verified_goods_only,
)
| 35.703297 | 116 | 0.698984 | [
"MIT"
] | uktrade/lite-ap | api/flags/models.py | 3,249 | Python |
from .Selenzy import (
readData,
updateScore,
analyse,
seqScore
)
from .Selenzy2 import (
analyse2
)
from .newtax import (
newtax
)
| 12 | 23 | 0.634615 | [
"MIT"
] | brsynth/selenzy-wrapper | selenzy_wrapper/selenzy/__init__.py | 156 | Python |
'''
Title : Shape and Reshape
Subdomain : Numpy
Domain : Python
Author : codeperfectplus
Created : 06 May 2020
'''
import numpy as np
arr = list(map(int,input().split()))
arr = np.array(arr)
print(np.reshape(arr,(3,3)))
| 15.666667 | 36 | 0.651064 | [
"MIT"
] | accidentalgenius09/competitive-programming-solution | HackerRank/PythonHackerRankSolutions/Numpy/ShapeandReshape.py | 235 | Python |
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Wallet encryption"""
import time
from test_framework.test_framework import enzoTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
assert_greater_than,
assert_greater_than_or_equal,
)
class WalletEncryptionTest(enzoTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
passphrase = "WalletPassphrase"
passphrase2 = "SecondWalletPassphrase"
# Make sure the wallet isn't encrypted first
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
assert_equal(privkey[:1], "c")
assert_equal(len(privkey), 52)
# Encrypt the wallet
self.nodes[0].node_encrypt_wallet(passphrase)
self.start_node(0)
# Test that the wallet is encrypted
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Check that walletpassphrase works
self.nodes[0].walletpassphrase(passphrase, 2)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
# Check that the timeout is right
time.sleep(2)
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wrong passphrase
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
# Test walletlock
self.nodes[0].walletpassphrase(passphrase, 84600)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wallet already unlocked
self.nodes[0].walletpassphrase(passphrase, 12000, True)
assert_raises_rpc_error(-17, "Wallet is already unlocked", self.nodes[0].walletpassphrase, passphrase, 100, True)
self.nodes[0].walletlock()
# Test passphrase changes
self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
self.nodes[0].walletpassphrase(passphrase2, 10)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
# Test timeout bounds
assert_raises_rpc_error(-8, "Timeout cannot be negative.", self.nodes[0].walletpassphrase, passphrase2, -10)
# Check the timeout
# Check a time less than the limit
MAX_VALUE = 100000000
expected_time = int(time.time()) + MAX_VALUE - 600
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE - 600)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
# Check a time greater than the limit
expected_time = int(time.time()) + MAX_VALUE - 1
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE + 1000)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
if __name__ == '__main__':
WalletEncryptionTest().main()
| 43.643678 | 138 | 0.704504 | [
"MIT"
] | EnzoNodes/ENZO | test/functional/wallet_encryption.py | 3,797 | Python |
#!/usr/bin/env python3
# Copyright (c) 2017 The Eurodollar Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import EurodollarTestFramework
from test_framework.util import assert_raises_rpc_error
class DeprecatedRpcTest(EurodollarTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ["-deprecatedrpc=estimatefee", "-deprecatedrpc=createmultisig"]]
def run_test(self):
self.log.info("estimatefee: Shows deprecated message")
assert_raises_rpc_error(-32, 'estimatefee is deprecated', self.nodes[0].estimatefee, 1)
self.log.info("Using -deprecatedrpc=estimatefee bypasses the error")
self.nodes[1].estimatefee(1)
self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses")
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()])
self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
if __name__ == '__main__':
DeprecatedRpcTest().main()
| 45.285714 | 123 | 0.73265 | [
"MIT"
] | watchdog1023/Eurodollar | test/functional/rpc_deprecated.py | 1,268 | Python |
#
# PySNMP MIB module CISCO-EVC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-EVC-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:57:43 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
CiscoCosList, = mibBuilder.importSymbols("CISCO-TC", "CiscoCosList")
ifIndex, InterfaceIndexOrZero = mibBuilder.importSymbols("IF-MIB", "ifIndex", "InterfaceIndexOrZero")
VlanId, VlanIdOrNone = mibBuilder.importSymbols("Q-BRIDGE-MIB", "VlanId", "VlanIdOrNone")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, Unsigned32, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, IpAddress, MibIdentifier, TimeTicks, Gauge32, iso, ModuleIdentity, NotificationType, Counter64, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Unsigned32", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "IpAddress", "MibIdentifier", "TimeTicks", "Gauge32", "iso", "ModuleIdentity", "NotificationType", "Counter64", "Counter32")
RowStatus, TruthValue, MacAddress, StorageType, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TruthValue", "MacAddress", "StorageType", "DisplayString", "TextualConvention")
ciscoEvcMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 613))
ciscoEvcMIB.setRevisions(('2012-05-21 00:00', '2008-05-01 00:00', '2007-12-20 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoEvcMIB.setRevisionsDescriptions(('- Added following objects to cevcSITable: * cevcSICreationType * cevcSIType - Added following objects to cevcSIForwardBdTable: * cevcSIForwardBdNumberBase * cevcSIForwardBdNumber1kBitmap * cevcSIForwardBdNumber2kBitmap * cevcSIForwardBdNumber3kBitmap * cevcSIForwardBdNumber4kBitmap - Added MacSecurityViolation OID subtree and following objects: * cevcMacAddress * cevcMaxMacConfigLimit * cevcSIID - Deprecated cevcEvcNotificationGroup and added cevcEvcNotificationGroupRev1 and added cevcMacSecurityViolationNotification - Deprecated cevcSIGroup and added cevcSIGroupRev1 and added cevcSICreationType and cevcSIType - Deprecated cevcSIForwardGroup and added cevcSIForwardGroupRev1 and added the new objects mentioned in cevcSIForwardBdTable - Added CevcMacSecurityViolationCause Textual convention - Added new ciscoEvcMIBComplianceRev2', '- Added following enums to cevcSIOperStatus: * deleted(4) * errorDisabled(5) * unknown(6) - Added following named bits to cevcSIMatchEncapValid: * payloadTypes(3) * priorityCos(4) * dot1qNativeVlan(5) * dot1adNativeVlan(6) * encapExact(7) - The Object cevcSIMatchEncapPayloadType is replaced by new object cevcSIMatchEncapPayloadTypes to support multiple payload types for service instance match criteria. - Added new object cevcSIMatchEncapPriorityCos to cevcSIMatchEncapTable. - Added new Compliance ciscoEvcMIBComplianceRev1. - Added new Object Group cevcSIMatchCriteriaGroupRev1. - Miscellaneous updates/corrections.', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoEvcMIB.setLastUpdated('201205210000Z')
if mibBuilder.loadTexts: ciscoEvcMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoEvcMIB.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: [email protected]')
if mibBuilder.loadTexts: ciscoEvcMIB.setDescription("Metro Ethernet services can support a wide range of applications and subscriber needs easily, efficiently and cost-effectively. Using standard Ethernet interfaces, subscribers can set up secure, private Ethernet Virtual Connections, to connect their sites together and connect to business partners, suppliers and the Internet. This MIB module defines the managed objects and notifications describing Ethernet Virtual Connections. Ethernet Virtual Connections (EVC), are defined by the Metro Ethernet Forum (MEF), as an association between two or more UNIs. Frames within an EVC can only be exchanged among the associated UNIs. Frames sent into the MEN via a particular UNI must not be delivered back to the UNI from which it originated. Along an EVC path, there are demarcation flow points on associated ingress and egress interface, of every device, through which the EVC passes. A service instance represents these flow points where a service passes through an interface. From an operational perspective, a service instance serves three purposes: 1. Defines the instance of a particular EVC service on a specific interface and identifies all frames that belongs to that particular service/flow. 2. To provide the capability of applying the configured features to those frames belonging to the service. 3. To optionally define how to forward those frames in the data-path. The association of a service instance to an EVC depicts an instance of an Ethernet flow on a particular interface for an end-to-end (UNI-to-UNI) Ethernet service for a subscriber. The following diagram illustrates the association of EVC, UNIs and service instances. UNI physical ports are depicted as 'U', and service instances as 'x'. CE MEN MEN CE ------- ------- ------- ------- | | | | () | | | | | |--------Ux x|--( )--|x xU--------| | | | | | () | | | | ------- ------- ------- ------- ^ ^ | | -------- EVC --------- This MIB module addresses the functional areas of network management for EVC, including: The operational mode for interfaces that are providing Ethernet service(s). The service attributes regarding an interface behaving as UNI, such as CE-VLAN mapping and layer 2 control protocol (eg. stp, vtp, cdp) processing. The provisioning of service instances to define flow points for an Ethernet service. The operational status of EVCs for notifications of status changes, and EVC creation and deletion. Definition of terms and acronyms: B-Tag: Backbone Tag field in Ethernet 802.1ah frame CE: Customer Edge CE-VLAN: Customer Edge VLAN CoS: Class Of Service EVC: Ethernet Virtual Connection I-SID: Service Instance Identifier field in Ethernet 802.1ah frame MAC: Media Access Control MEN: Metro Ethernet Network NNI: Network to Network Interface OAM: Operations Administration and Management PPPoE: Point-to-Point Protocol over Ethernet Service frame: An Ethernet frame transmitted across the UNI toward the service provider or an Ethernet frame transmitted across the UNI toward the Subscriber. Service Instance: A flow point of an Ethernet service Service provider: The organization providing Ethernet service(s). Subscriber: The organization purchasing and/or using Ethernet service(s). UNI: User Network Interface The physical demarcation point between the responsibility of the service provider and the responsibility of the Subscriber. UNI-C: User Network Interface, subscriber side UNI-N: User Network Interface, service provider side VLAN: Virtual Local Area Network")
ciscoEvcMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 0))
ciscoEvcMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1))
ciscoEvcMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 2))
cevcSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 1))
cevcPort = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2))
cevcEvc = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3))
cevcServiceInstance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4))
cevcEvcNotificationConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 5))
cevcMacSecurityViolation = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 6))
class CevcMacSecurityViolationCauseType(TextualConvention, Integer32):
description = "An integer value which identifies the cause for the MAC Security Violation. If the system MAC Address limit is exceeded, the cevcMacSecurityViolationCauseType will contain 'exceedSystemLimit' value. If the Bridge domain limit is exceeded, the cevcMacSecurityViolationCauseType will contain 'exceedBdLimit' value. If the Service Instance limit is exceeded, the cevcMacSecurityViolationCauseType will contain 'exceedSILimit' value. If the MAC address is present in the Black list then cevcMacSecurityViolationCauseType will contain 'blackListDeny' value."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("exceedSystemLimit", 1), ("exceedBdLimit", 2), ("exceedSILimit", 3), ("blackListDeny", 4))
class CiscoEvcIndex(TextualConvention, Unsigned32):
description = 'An integer-value which uniquely identifies the EVC.'
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 4294967295)
class CiscoEvcIndexOrZero(TextualConvention, Unsigned32):
description = "This textual convention is an extension to textual convention 'CiscoEvcIndex'. It includes the value of '0' in addition to the range of 1-429496725. Value of '0' indicates that the EVC has been neither configured nor assigned."
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 4294967295)
class CevcL2ControlProtocolType(TextualConvention, Integer32):
description = "Defines the different types of layer 2 control protocols: 'other' None of the following. 'cdp' Cisco Discovery Protocol. 'dtp' Dynamic Trunking Protocol. 'pagp' Port Aggregration Protocol. 'udld' UniDirectional Link Detection. 'vtp' Vlan Trunking Protocol. 'lacp' Link Aggregation Control Protocol. 'dot1x' IEEE 802.1x 'stp' Spanning Tree Protocol."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))
namedValues = NamedValues(("other", 1), ("cdp", 2), ("dtp", 3), ("pagp", 4), ("udld", 5), ("vtp", 6), ("lacp", 7), ("dot1x", 8), ("stp", 9))
class ServiceInstanceTarget(TextualConvention, OctetString):
description = "Denotes a generic service instance target. An ServiceInstanceTarget value is always interpreted within the context of an ServiceInstanceTargetType value. Every usage of the ServiceInstanceTarget textual convention is required to specify the ServiceInstanceTargetType object which provides the context. It is suggested that the ServiceInstanceTargetType object is logically registered before the object(s) which use the ServiceInstanceTarget textual convention if they appear in the same logical row. The value of an ServiceInstanceTarget object must always be consistent with the value of the associated ServiceInstanceTargetType object. Attempts to set an ServiceInstanceTarget object to a value which is inconsistent with the associated ServiceInstanceTargetType must fail with an inconsistentValue error. When this textual convention is used as the syntax of an index object, there may be issues with the limit of 128 sub-identifiers specified in SMIv2, STD 58. In this case, the object definition MUST include a 'SIZE' clause to limit the number of potential instance sub-identifiers."
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 40)
class ServiceInstanceTargetType(TextualConvention, Integer32):
description = "Defines the type of interface/media to which a service instance is attached. 'other' None of the following. This value MUST be used if the value of the corresponding ServiceInstanceTarget object is a zero-length string. 'interface' Service instance is attached to the the interface defined by ServiceInstanceInterface textual convention. Each definition of a concrete ServiceInstanceTargetType value must be accompanied by a definition of a textual convention for use with that ServiceInstanceTargetType. To support future extensions, the ServiceInstanceTargetType textual convention SHOULD NOT be sub-typed in object type definitions. It MAY be sub-typed in compliance statements in order to require only a subset of these target types for a compliant implementation. Implementations must ensure that ServiceInstanceTargetType objects and any dependent objects (e.g. ServiceInstanceTarget objects) are consistent. An inconsistentValue error must be generated if an attempt to change an ServiceInstanceTargetType object would, for example, lead to an undefined ServiceInstanceTarget value. In particular, ServiceInstanceTargetType/ServiceInstanceTarget pairs must be changed together if the service instance taget type changes."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("other", 1), ("interface", 2))
class ServiceInstanceInterface(TextualConvention, OctetString):
description = "This textual convention indicates the ifIndex which identifies the interface that the service instance is attached, for which the corresponding ifType has the value of (but not limited to) 'ethernetCsmacd'. octets contents encoding 1-4 ifIndex network-byte order The corresponding ServiceInstanceTargetType value is interface(2)."
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4)
fixedLength = 4
cevcMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 6, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcMacAddress.setStatus('current')
if mibBuilder.loadTexts: cevcMacAddress.setDescription('This object indicates the MAC Address which has violated the Mac security rules.')
cevcMaxMacConfigLimit = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 6, 2), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cevcMaxMacConfigLimit.setStatus('current')
if mibBuilder.loadTexts: cevcMaxMacConfigLimit.setDescription('This object specifies the maximum MAC configuration limit. This is also sent as a part of MAC security violation notification. Every platform has their own forwarding table limitation. User can also set the maximum MAC configuration limit and if the limit set by user is not supported by platform then the object returns error.')
cevcSIID = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 6, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcSIID.setStatus('current')
if mibBuilder.loadTexts: cevcSIID.setDescription('This object indicates the service instance ID for the MAC security violation notification.')
cevcViolationCause = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 6, 4), CevcMacSecurityViolationCauseType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcViolationCause.setStatus('current')
if mibBuilder.loadTexts: cevcViolationCause.setDescription("This object indicates the MAC security violation cause. When the system MAC Address limit is exceeded, the cevcMacSecurityViolationCause will contain 'exceedSystemLimit' value. When the Bridge domain limit is exceeded, the cevcMacSecurityViolationCause will contain 'exceedBdLimit' value. When the Service Instance limit is exceeded, the cevcMacSecurityViolationCause will contain 'exceedSILimit' value. If the MAC address is present in the Black list then cevcMacSecurityViolationCause will contain 'blackListDeny' value.")
cevcMaxNumEvcs = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcMaxNumEvcs.setStatus('current')
if mibBuilder.loadTexts: cevcMaxNumEvcs.setDescription('This object indicates the maximum number of EVCs that the system supports.')
cevcNumCfgEvcs = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcNumCfgEvcs.setStatus('current')
if mibBuilder.loadTexts: cevcNumCfgEvcs.setDescription('This object indicates the actual number of EVCs currently configured on the system.')
cevcPortTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 1), )
if mibBuilder.loadTexts: cevcPortTable.setStatus('current')
if mibBuilder.loadTexts: cevcPortTable.setDescription("This table provides the operational mode and configuration limitations of the physical interfaces (ports) that provide Ethernet services for the MEN. This table has a sparse depedent relationship on the ifTable, containing a row for each ifEntry having an ifType of 'ethernetCsmacd' capable of supporting Ethernet services.")
cevcPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cevcPortEntry.setStatus('current')
if mibBuilder.loadTexts: cevcPortEntry.setDescription("This entry represents a port, a physical point, at which signals can enter or leave the network en route to or from another network to provide Ethernet services for the MEN. The system automatically creates an entry for each ifEntry in the ifTable having an ifType of 'ethernetCsmacd' capable of supporting Ethernet services and entries are automatically destroyed when the corresponding row in the ifTable is destroyed.")
cevcPortMode = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("uni", 1), ("nni", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cevcPortMode.setStatus('current')
if mibBuilder.loadTexts: cevcPortMode.setDescription("Port denotes the physcial interface which can provide Ethernet services. This object indicates the mode of the port and its operational behaviour in the MEN. 'uni' User Network Interface The port resides on the interface between the end user and the network. Additional information related to the UNI is included in cevcUniTable. 'nni' Network to Network Interface. The port resides on the interface between two networks.")
cevcPortMaxNumEVCs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 1, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcPortMaxNumEVCs.setStatus('current')
if mibBuilder.loadTexts: cevcPortMaxNumEVCs.setDescription('This object indicates the maximum number of EVCs that the interface can support.')
cevcPortMaxNumServiceInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcPortMaxNumServiceInstances.setStatus('current')
if mibBuilder.loadTexts: cevcPortMaxNumServiceInstances.setDescription('This object indicates the maximum number of service instances that the interface can support.')
cevcUniTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 2), )
if mibBuilder.loadTexts: cevcUniTable.setStatus('current')
if mibBuilder.loadTexts: cevcUniTable.setDescription("This table contains a list of UNIs locally configured on the system. This table has a sparse dependent relationship on the cevcPortTable, containing a row for each cevcPortEntry having a cevcPortMode column value 'uni'.")
cevcUniEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cevcUniEntry.setStatus('current')
if mibBuilder.loadTexts: cevcUniEntry.setDescription("This entry represents an UNI and its service attributes. The system automatically creates an entry when the system or the EMS/NMS creates a row in the cevcPortTable with a cevcPortMode of 'uni'. Likewise, the system automatically destroys an entry when the system or the EMS/NMS destroys the corresponding row in the cevcPortTable.")
cevcUniIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 2, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcUniIdentifier.setReference("MEF 16, 'Ethernet Local Management Interface (E-LMI)', January 2006")
if mibBuilder.loadTexts: cevcUniIdentifier.setStatus('current')
if mibBuilder.loadTexts: cevcUniIdentifier.setDescription('This object specifies a string-value assigned to a UNI for identification. When the UNI identifier is configured by the system or the EMS/NMS, it should be unique among all UNIs for the MEN. If the UNI identifier value is not specified, the value of the cevcUniIdentifier column is a zero-length string.')
cevcUniPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dot1q", 1), ("dot1ad", 2))).clone('dot1q')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcUniPortType.setStatus('current')
if mibBuilder.loadTexts: cevcUniPortType.setDescription("This object specifies the UNI port type. 'dot1q' The UNI port is an IEEE 802.1q port. 'dot1ad' The UNI port is an IEEE 802.1ad port.")
cevcUniServiceAttributes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 2, 1, 3), Bits().clone(namedValues=NamedValues(("serviceMultiplexing", 0), ("bundling", 1), ("allToOneBundling", 2))).clone(namedValues=NamedValues(("serviceMultiplexing", 0), ("bundling", 1)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcUniServiceAttributes.setStatus('current')
if mibBuilder.loadTexts: cevcUniServiceAttributes.setDescription("This object specifies the UNI service attributes. 'serviceMultiplexing' This bit specifies whether the UNI supports multiple EVCs. Point-to-Point EVCs and Multipoint-to-Multipoint EVCs may be multiplexed in any combination at the UNI if this bit is set to '1'. 'bundling' This bit specifies whether the UNI has the bundling attribute configured. If this bit is set to '1', more than one CE-VLAN ID can map to a particular EVC at the UNI. 'allToOneBundling' This bit specifies whether the UNI has the all to one bundling attribute. If this bit is set to '1', all CE-VLAN IDs map to a single EVC at the UNI. To summarize the valid combinations of serviceMultiplexing(0), bundling(1) and allToOneBundling(2) bits for an UNI, consider the following diagram: VALID COMBINATIONS +---------------+-------+-------+-------+-------+-------+ |UNI ATTRIBUTES | 1 | 2 | 3 | 4 | 5 | +---------------+-------+------+------------------------+ |Service | | | | | | |Multiplexing | | Y | Y | | | | | | | | | | +---------------+-------+-------+-------+-------+-------+ | | | | | | | |Bundling | | | Y | Y | | | | | | | | | +---------------+-------+-------+-------+-------+-------+ |All to One | | | | | | |Bundling | | | | | Y | | | | | | | | +---------------+-------+-------+------ +-------+-------+")
cevcPortL2ControlProtocolTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 3), )
if mibBuilder.loadTexts: cevcPortL2ControlProtocolTable.setStatus('current')
if mibBuilder.loadTexts: cevcPortL2ControlProtocolTable.setDescription('This table lists the layer 2 control protocol processing attributes at UNI ports. This table has an expansion dependent relationship on the cevcUniTable, containing zero or more rows for each UNI.')
cevcPortL2ControlProtocolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-EVC-MIB", "cevcPortL2ControlProtocolType"))
if mibBuilder.loadTexts: cevcPortL2ControlProtocolEntry.setStatus('current')
if mibBuilder.loadTexts: cevcPortL2ControlProtocolEntry.setDescription('This entry represents the layer 2 control protocol processing at the UNI. The system automatically creates an entry for each layer 2 control protocol type when an entry is created in the cevcUniTable, and entries are automatically destroyed when the system destroys the corresponding row in the cevcUniTable.')
cevcPortL2ControlProtocolType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 3, 1, 1), CevcL2ControlProtocolType())
if mibBuilder.loadTexts: cevcPortL2ControlProtocolType.setStatus('current')
if mibBuilder.loadTexts: cevcPortL2ControlProtocolType.setDescription('This object indicates the type of layer 2 control protocol service frame as denoted by the value of cevcPortL2ControlProtocolAction column.')
cevcPortL2ControlProtocolAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("discard", 1), ("peer", 2), ("passToEvc", 3), ("peerAndPassToEvc", 4))).clone('discard')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcPortL2ControlProtocolAction.setStatus('current')
if mibBuilder.loadTexts: cevcPortL2ControlProtocolAction.setDescription("This object specifies the action to be taken for the given layer 2 control protocol service frames which matches the cevcPortL2ControlProtocolType, including: 'discard' The port must discard all ingress service frames carrying the layer 2 control protocol service frames and the port must not generate any egress service frames carrying the layer 2 control protocol service frames. When this action is set at the port, an EVC cannot process the layer 2 control protocol service frames. 'peer' The port must act as a peer, meaning it actively participates with the Customer Equipment, in the operation of the layer 2 control protocol service frames. An example of this is port authentication service at the UNI with 802.1x or enhanced link OAM functionality by peering at the UNI with link OAM (IEEE 802.3ah). When this action is set at the port, an EVC cannot process the layer 2 control protocol service frames. 'passToEvc' The disposition of the service frames which are layer 2 control protocol service frames must be determined by the layer 2 control protocol action attribute of the EVC, (see cevcSIL2ControlProtocolAction for further details). 'peerAndPassToEvc' The layer 2 control protocol service frames will be peered at the port and also passed to one or more EVCs for tunneling. An example of this possibility is where an 802.1x authentication frame is peered at the UNI for UNI-based authentication, but also passed to a given EVC for customer end-to-end authentication.")
cevcUniCEVlanEvcTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 4), )
if mibBuilder.loadTexts: cevcUniCEVlanEvcTable.setStatus('current')
if mibBuilder.loadTexts: cevcUniCEVlanEvcTable.setDescription('This table contains for each UNI, a list of EVCs and the association of CE-VLANs to the EVC. The CE-VLAN mapping is locally significant to the UNI. This table has an expansion dependent relationship on the cevcUniTable, containing zero or more rows for each UNI.')
cevcUniCEVlanEvcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-EVC-MIB", "cevcUniEvcIndex"), (0, "CISCO-EVC-MIB", "cevcUniCEVlanEvcBeginningVlan"))
if mibBuilder.loadTexts: cevcUniCEVlanEvcEntry.setStatus('current')
if mibBuilder.loadTexts: cevcUniCEVlanEvcEntry.setDescription('This entry represents an EVC and the CE-VLANs that are mapped to it at an UNI. For example, if CE-VLANs 10, 20-30, 40 are mapped to an EVC indicated by cevcUniEvcIndex = 1, at an UNI with ifIndex = 2, this table will contain following rows to represent above CE-VLAN map: cevcUniCEVlanEvcEndingVlan.2.1.10 = 0 cevcUniCEVlanEvcEndingVlan.2.1.20 = 30 cevcUniCEVlanEvcEndingVlan.2.1.40 = 0 The system automatically creates an entry when the system creates an entry in the cevcUniTable and an entry is created in cevcSICEVlanTable for a service instance which is attached to an EVC on this UNI. Likewise, the system automatically destroys an entry when the system or the EMS/NMS destroys the corresponding row in the cevcUniTable or in the cevcSICEVlanTable.')
cevcUniEvcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 4, 1, 1), CiscoEvcIndex())
if mibBuilder.loadTexts: cevcUniEvcIndex.setStatus('current')
if mibBuilder.loadTexts: cevcUniEvcIndex.setDescription('This object indicates an arbitrary integer-value that uniquely identifies the EVC attached at an UNI.')
cevcUniCEVlanEvcBeginningVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 4, 1, 2), VlanId())
if mibBuilder.loadTexts: cevcUniCEVlanEvcBeginningVlan.setStatus('current')
if mibBuilder.loadTexts: cevcUniCEVlanEvcBeginningVlan.setDescription("If cevcUniCEVlanEvcEndingVlan is '0', then this object indicates a single VLAN in the list. If cevcUniCEVlanEvcEndingVlan is not '0', then this object indicates the first VLAN in a range of VLANs.")
cevcUniCEVlanEvcEndingVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 4, 1, 3), VlanIdOrNone()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcUniCEVlanEvcEndingVlan.setStatus('current')
if mibBuilder.loadTexts: cevcUniCEVlanEvcEndingVlan.setDescription("This object indicates the last VLAN in a range of VLANs. If the row does not describe a range, then the value of this column must be '0'.")
cevcEvcTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1), )
if mibBuilder.loadTexts: cevcEvcTable.setStatus('current')
if mibBuilder.loadTexts: cevcEvcTable.setDescription('This table contains a list of EVCs and their service attributes.')
cevcEvcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcEvcIndex"))
if mibBuilder.loadTexts: cevcEvcEntry.setStatus('current')
if mibBuilder.loadTexts: cevcEvcEntry.setDescription("This entry represents the EVC configured on the system and its service atrributes. Entries in this table may be created and deleted via the cevcEvcRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of cevcEvcRowStatus column to 'createAndGo'or 'createAndWait'. Rows are deleted by a SET request setting the value of cevcEvcRowStatus column to 'destroy'.")
cevcEvcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1, 1), CiscoEvcIndex())
if mibBuilder.loadTexts: cevcEvcIndex.setStatus('current')
if mibBuilder.loadTexts: cevcEvcIndex.setDescription('This object indicates an arbitrary integer-value that uniquely identifies the EVC.')
cevcEvcRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcEvcRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcEvcRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcEvcTable. cevcEvcIdentifier column must have a valid value before a row can be set to 'active'. Writable objects in this table can be modified while the value of cevcEvcRowStatus column is 'active'. An entry cannot be deleted if there exists a service instance which is referring to the cevcEvcEntry i.e. cevcSIEvcIndex in the cevcSITable has the same value as cevcEvcIndex being deleted.")
cevcEvcStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcEvcStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcEvcStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcEvcIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 100))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcEvcIdentifier.setReference("MEF 16, 'Ethernet Local Management Interface (E-LMI)', January 2006")
if mibBuilder.loadTexts: cevcEvcIdentifier.setStatus('current')
if mibBuilder.loadTexts: cevcEvcIdentifier.setDescription('This object specifies a string-value identifying the EVC. This value should be unique across the MEN.')
cevcEvcType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("pointToPoint", 1), ("multipointToMultipoint", 2))).clone('pointToPoint')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcEvcType.setStatus('current')
if mibBuilder.loadTexts: cevcEvcType.setDescription("This object specifies the type of EVC: 'pointToPoint' Exactly two UNIs are associated with one another. An ingress service frame at one UNI must not result in an egress service frame at a UNI other than the other UNI in the EVC. 'multipointToMultipoint' Two or more UNIs are associated with one another. An ingress service frame at one UNI must not result in an egress service frame at a UNI that is not in the EVC.")
cevcEvcCfgUnis = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(2, 4294967295)).clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcEvcCfgUnis.setStatus('current')
if mibBuilder.loadTexts: cevcEvcCfgUnis.setDescription("This object specifies the number of UNIs expected to be configured for the EVC in the MEN. The underlying OAM protocol can use this value of UNIs to determine the EVC operational status, cevcEvcOperStatus. For a Multipoint-to-Multipoint EVC the minimum number of Uni's would be two.")
cevcEvcStateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 2), )
if mibBuilder.loadTexts: cevcEvcStateTable.setStatus('current')
if mibBuilder.loadTexts: cevcEvcStateTable.setDescription('This table lists statical/status data of the EVC. This table has an one-to-one dependent relationship on the cevcEvcTable, containing a row for each EVC.')
cevcEvcStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 2, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcEvcIndex"))
if mibBuilder.loadTexts: cevcEvcStateEntry.setStatus('current')
if mibBuilder.loadTexts: cevcEvcStateEntry.setDescription('This entry represents status atrributes of an EVC. The system automatically creates an entry when the system or the EMS/NMS creates a row in the cevcEvcTable. Likewise, the system automatically destroys an entry when the system or the EMS/NMS destroys the corresponding row in the cevcEvcTable.')
cevcEvcOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("active", 2), ("partiallyActive", 3), ("inactive", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcEvcOperStatus.setStatus('current')
if mibBuilder.loadTexts: cevcEvcOperStatus.setDescription("This object specifies the operational status of the EVC: 'unknown' Not enough information available regarding the EVC to determine the operational status at this time or EVC operational status is undefined. 'active' Fully operational between the UNIs in the EVC. 'partiallyActive' Capable of transferring traffic among some but not all of the UNIs in the EVC. This operational status is applicable only for Multipoint-to-Multipoint EVCs. 'inactive' Not capable of transferring traffic among any of the UNIs in the EVC. This value is derived from data gathered by underlying OAM protocol.")
cevcEvcActiveUnis = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 2, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcEvcActiveUnis.setStatus('current')
if mibBuilder.loadTexts: cevcEvcActiveUnis.setDescription('This object indicates the number of active UNIs for the EVC in the MEN. This value is derived from data gathered by underlying OAM Protocol.')
cevcEvcUniTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 3), )
if mibBuilder.loadTexts: cevcEvcUniTable.setStatus('current')
if mibBuilder.loadTexts: cevcEvcUniTable.setDescription("This table contains a list of UNI's for each EVC configured on the device. The UNIs can be local (i.e. physically located on the system) or remote (i.e. not physically located on the device). For local UNIs, the UNI Id is the same as denoted by cevcUniIdentifier with the same ifIndex value as cevcEvcLocalUniIfIndex. For remote UNIs, the underlying OAM protocol, if capable, provides the UNI Id via its protocol messages. This table has an expansion dependent relationship on the cevcEvcTable, containing a row for each UNI that is in the EVC.")
cevcEvcUniEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 3, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcEvcIndex"), (0, "CISCO-EVC-MIB", "cevcEvcUniIndex"))
if mibBuilder.loadTexts: cevcEvcUniEntry.setStatus('current')
if mibBuilder.loadTexts: cevcEvcUniEntry.setDescription('This entry represents a UNI, either local or remote, in the EVC. The system automatically creates an entry, when an UNI is attached to the EVC. Entries are automatically destroyed when the system or the EMS/NMS destroys the corresponding row in the cevcEvcTable or when an UNI is removed from the EVC.')
cevcEvcUniIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 3, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cevcEvcUniIndex.setStatus('current')
if mibBuilder.loadTexts: cevcEvcUniIndex.setDescription('This object indicates an arbitrary integer-value that uniquely identifies the UNI in an EVC.')
cevcEvcUniId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 3, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcEvcUniId.setReference('MEF 16, Ethernet Local Management Interface (E-LMI), January 2006')
if mibBuilder.loadTexts: cevcEvcUniId.setStatus('current')
if mibBuilder.loadTexts: cevcEvcUniId.setDescription('This object indicates the string-value identifying the UNI that is in the EVC. For UNI that is local, this value is the same as cevcUniIdentifier for the same ifIndex value as cevcEvcLocalUniIfIndex. For UNI that is not on the system, this value may be derived from the underlying OAM protocol. If the UNI identifier value is not specified for the UNI or it is unknown, the value of the cevcEvcUniId column is a zero-length string.')
cevcEvcUniOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("unknown", 1), ("notReachable", 2), ("up", 3), ("down", 4), ("adminDown", 5), ("localExcessiveError", 6), ("remoteExcessiveError", 7), ("localInLoopback", 8), ("remoteInLoopback", 9), ("localOutLoopback", 10), ("remoteOutLoopback", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcEvcUniOperStatus.setStatus('current')
if mibBuilder.loadTexts: cevcEvcUniOperStatus.setDescription("This object indicates the operational status derived from data gathered by the OAM protocol for an UNI. 'unknown' Status is not known; possible reason could be caused by the OAM protocol has not provided information regarding the UNI. 'notReachable' UNI is not reachable; possible reason could be caused by the OAM protocol messages having not been received for an excessive length of time. 'up' UNI is active, up, and able to pass traffic. 'down' UNI is down and not passing traffic. 'adminDown' UNI has been administratively put in down state. 'localExcessiveError' UNI has experienced excessive number of invalid frames on the local end of the physical link between UNI-C and UNI-N. 'remoteExcessiveError' UNI has experienced excessive number of invalid frames on the remote side of the physical connection between UNI-C and UNI-N. 'localInLoopback' UNI is loopback on the local end of the physical link between UNI-C and UNI-N. 'remoteInLoopback' UNI is looped back on the remote end of the link between UNI-C and UNI-N. 'localOutLoopback' UNI just transitioned out of loopback on the local end of the physcial link between UNI-C and UNI-N. 'remoteOutLoopback' UNI just transitioned out of loopback on the remote end of the physcial link between UNI-C and UNI-N.")
cevcEvcLocalUniIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 3, 1, 4), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcEvcLocalUniIfIndex.setStatus('current')
if mibBuilder.loadTexts: cevcEvcLocalUniIfIndex.setDescription("When the UNI is local on the system, this object specifies the ifIndex of the UNI. The value '0' of this column indicates remote UNI.")
cevcSITable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1), )
if mibBuilder.loadTexts: cevcSITable.setStatus('current')
if mibBuilder.loadTexts: cevcSITable.setDescription('This table lists each service instance and its service attributes.')
cevcSIEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"))
if mibBuilder.loadTexts: cevcSIEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIEntry.setDescription("This entry represents a service instance configured on the system and its service attributes. Entries in this table may be created and deleted via the cevcSIRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of cevcSIRowStatus column to 'createAndGo'or 'createAndWait'. Rows are deleted by a SET request setting the value of cevcSIRowStatus column to 'destroy'.")
cevcSIIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cevcSIIndex.setStatus('current')
if mibBuilder.loadTexts: cevcSIIndex.setDescription('This object indicates an arbitrary integer-value that uniquely identifies a service instance. An implementation MAY assign an ifIndex-value assigned to the service instance to cevcSIIndex.')
cevcSIRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSITable. This object cannot be set to 'active' until following corresponding objects are assigned to valid values: - cevcSITargetType - cevcSITarget - cevcSIName - cevcSIType Following writable objects in this table cannot be modified while the value of cevcSIRowStatus is 'active': - cevcSITargetType - cevcSITarget - cevcSIName - cevcSIType Objects in this table and all other tables that have the same cevcSIIndex value as an index disappear when cevcSIRowStatus is set to 'destroy'.")
cevcSIStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSIStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSITargetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 4), ServiceInstanceTargetType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSITargetType.setStatus('current')
if mibBuilder.loadTexts: cevcSITargetType.setDescription('This object indicates the type of interface/media to which a service instance has an attachment.')
cevcSITarget = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 5), ServiceInstanceTarget()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSITarget.setStatus('current')
if mibBuilder.loadTexts: cevcSITarget.setDescription('This object indicates the target to which a service instance has an attachment. If the target is unknown, the value of the cevcSITarget column is a zero-length string.')
cevcSIName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 6), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIName.setStatus('current')
if mibBuilder.loadTexts: cevcSIName.setDescription("The textual name of the service instance. The value of this column should be the name of the component as assigned by the local interface/media type and should be be suitable for use in commands entered at the device's 'console'. This might be text name, such as 'si1' or a simple service instance number, such as '1', depending on the interface naming syntax of the device. If there is no local name or this object is otherwise not applicable, then this object contains a zero-length string.")
cevcSIEvcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 7), CiscoEvcIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIEvcIndex.setStatus('current')
if mibBuilder.loadTexts: cevcSIEvcIndex.setDescription("This object specifies the EVC Index that the service instance is associated. The value of '0' this column indicates that the service instance is not associated to an EVC. If the value of cevcSIEvcIndex column is not '0', there must exist an active row in the cevcEvcTable with the same index value for cevcEvcIndex.")
cevcSIAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2))).clone('up')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIAdminStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIAdminStatus.setDescription("This object specifies the desired state of the Service Instance. 'up' Ready to transfer traffic. When a system initializes, all service instances start with this state. 'down' The service instance is administratively down and is not capable of transferring traffic.")
cevcSIForwardingType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("other", 0), ("bridgeDomain", 1)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardingType.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardingType.setDescription("This object indicates technique used by a service instance to forward service frames. 'other' If the forwarding behavior of a service instance is not defined or unknown, this object is set to other(0). 'bridgeDomain' Bridge domain is used to forward service frames by a service instance. If cevcSIForwardingType is 'bridgeDomain(1)', there must exist an active row in the cevcSIForwardBdTable with the same index value of cevcSIIndex. The object cevcSIForwardBdNumber indicates the identifier of the bridge domain component being used.")
cevcSICreationType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("static", 1), ("dynamic", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcSICreationType.setStatus('current')
if mibBuilder.loadTexts: cevcSICreationType.setDescription("This object specifies whether the service instance created is statically configured by the user or is dynamically created. 'static' If the service instance is configured manually this object is set to static(1). 'dynamic' If the service instance is created dynamically by the first sign of life of an Ethernet frame, then this object is set to dynamic(2) for the service instance.")
cevcSIType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("regular", 1), ("trunk", 2), ("l2context", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIType.setStatus('current')
if mibBuilder.loadTexts: cevcSIType.setDescription("This object specifies the type of the service instance. It mentions if the service instance is either a regular or trunk or l2context service instance. 'regular' If a service instance is configured without any type specified, then it is a regular service instance. 'trunk' If the service instance is configured with trunk type, then it is a trunk service instance. For a trunk service instance, its Bridge domain IDs are derived from encapsulation VLAN plus an optional offset (refer cevcSIForwardBdNumberBase object). 'l2context' If the service instance is configured with dynamic type, then it is a L2 context service instance. The Ethernet L2 Context is a statically configured service instance which contains the Ethernet Initiator for attracting the first sign of life. In other words, Ethernet L2 Context service instance is used for catching the first sign of life of Ethernet frames to create dynamic Ethernet sessions service instances.")
cevcSIStateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 2), )
if mibBuilder.loadTexts: cevcSIStateTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIStateTable.setDescription('This table lists statical status data of the service instance. This table has an one-to-one dependent relationship on the cevcSITable, containing a row for each service instance.')
cevcSIStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 2, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"))
if mibBuilder.loadTexts: cevcSIStateEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIStateEntry.setDescription('This entry represents operational status of a service instance. The system automatically creates an entry when the system or the EMS NMS creates a row in the cevcSITable. Likewise, the system automatically destroys an entry when the system or the EMS NMS destroys the corresponding row in the cevcSITable.')
cevcSIOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("adminDown", 3), ("deleted", 4), ("errorDisabled", 5), ("unknown", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcSIOperStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIOperStatus.setDescription("This object indicates the operational status of the Service Instance. 'up' Service instance is fully operational and able to transfer traffic. 'down' Service instance is down and not capable of transferring traffic, and is not administratively configured to be down by management system. 'adminDown' Service instance has been explicitly configured to administratively down by a management system and is not capable of transferring traffic. 'deleted' Service instance has been deleted. 'errorDisabled' Service instance has been shut down due to MAC security violations. 'unknown' Operational status of service instance is unknown or undefined.")
cevcSIVlanRewriteTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3), )
if mibBuilder.loadTexts: cevcSIVlanRewriteTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteTable.setDescription("This table lists the rewrite adjustments of the service frame's VLAN tags for service instances. This table has an expansion dependent relationship on the cevcSITable, containing a row for a VLAN adjustment for ingress and egress frames at each service instance.")
cevcSIVlanRewriteEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSIVlanRewriteDirection"))
if mibBuilder.loadTexts: cevcSIVlanRewriteEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteEntry.setDescription('Each entry represents the VLAN adjustment for a Service Instance.')
cevcSIVlanRewriteDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ingress", 1), ("egress", 2))))
if mibBuilder.loadTexts: cevcSIVlanRewriteDirection.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteDirection.setDescription("This object specifies the VLAN adjustment for 'ingress' frames or 'egress' frames on the service instance.")
cevcSIVlanRewriteRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSIVlanRewriteTable. cevcSIVlanRewriteAction and cevcSIVlanRewriteEncapsulation must have valid values before this object can be set to 'active'. Writable objects in this table can be modified while the value of cevcSIVlanRewriteRowStatus column is 'active'.")
cevcSIVlanRewriteStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSIVlanRewriteAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("push1", 1), ("push2", 2), ("pop1", 3), ("pop2", 4), ("translate1To1", 5), ("translate1To2", 6), ("translate2To1", 7), ("translate2To2", 8)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteAction.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteAction.setDescription("This object specifies the rewrite action the device performs for the service instance, including: 'push1' Add cevcSIVlanRewriteVlan1 as the VLAN tag to the service frame. 'push2' Add cevcSIVlanRewriteVlan1 as the outer VLAN tag and cevcSIVlanRewriteVlan2 as the inner VLAN tag of the service frame. 'pop1' Remove the outermost VLAN tag from the service frame. 'pop2' Remove the two outermost VLAN tags from the service frame. 'translate1To1' Replace the outermost VLAN tag with the cevcSIVlanRewriteVlan1 tag. 'translate1To2' Replace the outermost VLAN tag with cevcSIVlanRewriteVlan1 and add cevcSIVlanRewriteVlan2 to the second VLAN tag of the service frame. 'translate2To1' Remove the outermost VLAN tag and replace the second VLAN tag with cevcSIVlanVlanRewriteVlan1. 'translate2To2' Replace the outermost VLAN tag with cevcSIVlanRewriteVlan1 and the second VLAN tag with cevcSIVlanRewriteVlan2.")
cevcSIVlanRewriteEncapsulation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dot1q", 1), ("dot1ad", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteEncapsulation.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteEncapsulation.setDescription("This object specifies the encapsulation type to process for the service instance. 'dot1q' The IEEE 802.1q encapsulation. 'dot1ad' The IEEE 802.1ad encapsulation.")
cevcSIVlanRewriteVlan1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 6), VlanId()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteVlan1.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteVlan1.setDescription("This object specifies the outermost VLAN ID tag of the frame for the service instance. This object is valid only when cevcSIVlanRewriteAction is 'push1', 'push2', 'translate1To1', 'translate1To2', 'translate2To1', or 'translate2To2'.")
cevcSIVlanRewriteVlan2 = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 7), VlanId()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteVlan2.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteVlan2.setDescription("This object specifies the second VLAN ID tag of the frame for the service instance. This object is valid only when cevcSIVlanRewriteAction is 'push2', 'translate1To2', or 'translate2To2'.")
cevcSIVlanRewriteSymmetric = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 8), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteSymmetric.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteSymmetric.setDescription("This object is valid only when cevcSIVlanRewriteDirection is 'ingress'. The value 'true' of this column specifies that egress packets are tagged with a VLAN specified by an active row in cevcSIPrimaryVlanTable. There could only be one VLAN value assigned in the cevcSIPrimaryVlanTable, i.e. only one 'active' entry that has the same index value of cevcSIIndex column and corresponding instance of cevcSIPrimaryVlanEndingVlan column has value '0'.")
cevcSIL2ControlProtocolTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 4), )
if mibBuilder.loadTexts: cevcSIL2ControlProtocolTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIL2ControlProtocolTable.setDescription('This table lists the layer 2 control protocol processing attributes at service instances. This table has an expansion dependent relationship on the cevcSITable, containing a row for each layer 2 control protocol disposition at each service instance.')
cevcSIL2ControlProtocolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 4, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSIL2ControlProtocolType"))
if mibBuilder.loadTexts: cevcSIL2ControlProtocolEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIL2ControlProtocolEntry.setDescription('This entry represents the layer 2 control protocol processing at a service instance. The system automatically creates an entry for each layer 2 control protocol type when an entry is created in the cevcSITable, and entries are automatically destroyed when the system destroys the corresponding row in the cevcSITable.')
cevcSIL2ControlProtocolType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 4, 1, 1), CevcL2ControlProtocolType())
if mibBuilder.loadTexts: cevcSIL2ControlProtocolType.setStatus('current')
if mibBuilder.loadTexts: cevcSIL2ControlProtocolType.setDescription('The layer 2 control protocol service frame that the service instance is to process as defined by object cevcSIL2ControlProtocolAction.')
cevcSIL2ControlProtocolAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("discard", 1), ("tunnel", 2), ("forward", 3))).clone('discard')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIL2ControlProtocolAction.setStatus('current')
if mibBuilder.loadTexts: cevcSIL2ControlProtocolAction.setDescription("The actions to be taken for a given layer 2 control protocol service frames that matches cevcSIL2ControlProtocolType, including: 'discard' The MEN must discard all ingress service frames carrying the layer 2 control protocol service frames on the EVC and the MEN must not generate any egress service frames carrying the layer 2 control protocol frames on the EVC. 'tunnel' Forward the layer 2 control protocol service frames with the MAC address changed as defined by the individual layer 2 control protocol. The EVC does not process the layer 2 protocol service frames. If a layer 2 control protocol service frame is to be tunneled, all the UNIs in the EVC must be configured to pass the layer 2 control protocol service frames to the EVC, cevcPortL2ControlProtocolAction column has the value of 'passToEvc' or 'peerAndPassToEvc'. 'forward' Forward the layer 2 conrol protocol service frames as data; similar to tunnel but layer 2 control protocol service frames are forwarded without changing the MAC address.")
cevcSICEVlanTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 5), )
if mibBuilder.loadTexts: cevcSICEVlanTable.setStatus('current')
if mibBuilder.loadTexts: cevcSICEVlanTable.setDescription('This table contains the CE-VLAN map list for each Service Instance. This table has an expansion dependent relationship on the cevcSITable, containing a row for each CE-VLAN or a range of CE-VLANs that are mapped to a service instance.')
cevcSICEVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 5, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSICEVlanBeginningVlan"))
if mibBuilder.loadTexts: cevcSICEVlanEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSICEVlanEntry.setDescription("This entry contains the CE-VLANs that are mapped at a Service Instance. Entries in this table may be created and deleted via the cevcSICEVlanRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of cevcSICEVlanRowStatus column to 'createAndGo' or 'createAndWait'. Rows are deleted by a SET request setting the value of cevcSICEVlanRowStatus column to 'destroy'.")
cevcSICEVlanBeginningVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 5, 1, 1), VlanId())
if mibBuilder.loadTexts: cevcSICEVlanBeginningVlan.setStatus('current')
if mibBuilder.loadTexts: cevcSICEVlanBeginningVlan.setDescription("If cevcSICEVlanEndingVlan is '0', then this object indicates a single VLAN in the list. If cevcSICEVlanEndingVlan is not '0', then this object indicates the first VLAN in a range of VLANs.")
cevcSICEVlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 5, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSICEVlanRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSICEVlanRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSICEVlanTable. This object cannot be set to 'active' until all objects have been assigned valid values. Writable objects in this table can be modified while the value of the cevcSICEVlanRowStatus column is 'active'.")
cevcSICEVlanStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 5, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSICEVlanStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSICEVlanStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSICEVlanEndingVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 5, 1, 4), VlanIdOrNone()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSICEVlanEndingVlan.setStatus('current')
if mibBuilder.loadTexts: cevcSICEVlanEndingVlan.setDescription("This object indicates the last VLAN in a range of VLANs. If the row does not describe a range, then the value of this column must be '0'.")
cevcSIMatchCriteriaTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 6), )
if mibBuilder.loadTexts: cevcSIMatchCriteriaTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchCriteriaTable.setDescription('This table contains the match criteria for each Service Instance. This table has an expansion dependent relationship on the cevcSITable, containing a row for each group of match criteria of each service instance.')
cevcSIMatchCriteriaEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 6, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSIMatchCriteriaIndex"))
if mibBuilder.loadTexts: cevcSIMatchCriteriaEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchCriteriaEntry.setDescription("This entry represents a group of match criteria for a service instance. Each entry in the table with the same cevcSIIndex and different cevcSIMatchCriteriaIndex represents an OR operation of the match criteria for the service instance. Entries in this table may be created and deleted via the cevcSIMatchRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of cevcSIMatchRowStatus column to 'createAndGo' or 'createAndWait'. Rows are deleted by a SET request setting the value of cevcSIMatchRowStatus column to 'destroy'.")
cevcSIMatchCriteriaIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 6, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cevcSIMatchCriteriaIndex.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchCriteriaIndex.setDescription('This object indicates an arbitrary integer-value that uniquely identifies a match criteria for a service instance.')
cevcSIMatchRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 6, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSIMatchCriteriaTable. If the value of cevcSIMatchCriteriaType column is 'dot1q(1)' or 'dot1ad(2)' or 'untaggedAndDot1q' or 'untaggedAndDot1ad, then cevcSIMatchCriteriaRowStatus can not be set to 'active' until there exist an active row in the cevcSIMatchEncapTable with the same index value for cevcSIIndex and cevcSIMatchCriteriaIndex. Writable objects in this table can be modified while the value of the cevcSIMatchRowStatus column is 'active'.")
cevcSIMatchStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 6, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSIMatchCriteriaType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 6, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("unknown", 1), ("dot1q", 2), ("dot1ad", 3), ("untagged", 4), ("untaggedAndDot1q", 5), ("untaggedAndDot1ad", 6), ("priorityTagged", 7), ("defaultTagged", 8)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchCriteriaType.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchCriteriaType.setDescription("This object specifies the criteria used to match a service instance. 'unknown' Match criteria for the service instance is not defined or unknown. 'dot1q' The IEEE 802.1q encapsulation is used as a match criteria for the service instance. The ether type value of the IEEE 802.1q tag is specified by the object cevcSIEncapEncapsulation with the same index value of cevcSIIndex and cevcSIMatchCreriaIndex. 'dot1ad' The IEEE 802.1ad encapsulation is used as a match criteria for the service instance. The ether type value of the IEEE 802.1ad tag is specified by the cevcSIEncapEncapsulation column with the same index value of cevcSIIndex and cevcSIMatchCreriaIndex. 'untagged' Service instance processes untagged service frames. Only one service instance on the interface/media type can use untagged frames as a match criteria. 'untaggedAndDot1q' Both untagged frames and the IEEE 802.1q encapsulation are used as a match criteria for the service instance. Only one service instance on the interface/media type can use untagged frames as a match criteria. The ether type value of the IEEE 802.1q tag is specified by the cevcSIEncapEncapsulation column with the same index value of cevcSIIndex and cevcSIMatchCreriaIndex. 'untaggedAndDot1ad' Both untagged frames and the IEEE 802.1ad encapsulation are used as a match criteria for the service instance. Only one service instance on the interface/media type can use untagged frames as a match criteria. The ether type value of the IEEE 802.1ad tag is specified by the cevcSIEncapEncapsulation column with the same index value of cevcSIIndex and cevcSIMatchCreriaIndex. 'priorityTagged' Service instance processes priority tagged frames. Only one service instance on the interface/media type can use priority tagged frames as a match criteria. 'defaultTagged' Service instance is a default service instance. The default service instance processes frames with VLANs that do not match to any other service instances configured on the interface/media type. Only one service instance on the interface/media type can be the default service instance.")
cevcSIMatchEncapTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7), )
if mibBuilder.loadTexts: cevcSIMatchEncapTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapTable.setDescription("This table contains the encapsulation based match criteria for each service instance. This table has a sparse dependent relationship on the cevcSIMatchCriteriaTable, containing a row for each match criteria having one of the following values for cevcSIMatchCriteriaType: - 'dot1q' - 'dot1ad' - 'untaggedAndDot1q' - 'untaggedAndDot1ad' - 'priorityTagged'")
cevcSIMatchEncapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSIMatchCriteriaIndex"))
if mibBuilder.loadTexts: cevcSIMatchEncapEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapEntry.setDescription("This entry represents a group of encapulation match criteria for a service instance. Entries in this table may be created and deleted via the cevcSIMatchEncapRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of cevcSIMatchEncapRowStatus column to 'createAndGo' or 'createAndWait'. Rows are deleted by a SET request setting the value of cevcSIMatchEncapRowStatus column to 'destroy'.")
cevcSIMatchEncapRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 1), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSIMatchEncapTable. This object cannot be set to 'active' until cevcSIEncapEncapsulation and objects referred by cevcSIMatchEncapValid have been assigned their respective valid values. Writable objects in this table can be modified while the value of the cevcSIEncapMatchRowStatus column is 'active'.")
cevcSIMatchEncapStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 2), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSIMatchEncapValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 3), Bits().clone(namedValues=NamedValues(("primaryCos", 0), ("secondaryCos", 1), ("payloadType", 2), ("payloadTypes", 3), ("priorityCos", 4), ("dot1qNativeVlan", 5), ("dot1adNativeVlan", 6), ("encapExact", 7)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapValid.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapValid.setDescription("This object specifies the encapsulation criteria used to match a service instance. 'primaryCos' The 'primaryCos' bit set to '1' specifies the Class of Service is used as service match criteria for the service instance. When this bit is set to '1' there must exist aleast one active rows in the cevcSIPrimaryVlanTable which has the same index values of cevcSIIndex and cevcSIMatchCriteriaIndex. When 'primaryCos' bit is '1', the cevcSIPrimaryCos column indicates the CoS value(s). 'secondaryCos' The 'secondaryCos' bit set to '1' specifies the Class of Service is used as service match criteria for the service instance. When this bit is set to '1' there must exist aleast one active rows in the cevcSISecondaryVlanTable which has the same index values of cevcSIIndex and cevcSIMatchCriteriaIndex. When 'secondaryCos' bit is '1', the cevcSISecondaryCos column indicates the CoS value(s). 'payloadType' This bit set to '1' specifies that the value of corresponding instance of cevcSIMatchEncapPayloadType is used as service match criteria for the service instance. 'payloadTypes' This bit set to '1' specifies that the value of corresponding instance of cevcSIMatchEncapPayloadTypes is used as service match criteria for the service instance. 'priorityCos' This bit set to '1' specifies that the value of corresponding instance of cevcSIMatchEncapPriorityCos is used as service match criteria for the service instance. 'dot1qNativeVlan' This bit set to '1' specifies that the IEEE 802.1q tag with native vlan is used as service match criteria for the service instance. 'dot1adNativeVlan' This bit set to '1' specifies that the IEEE 802.1ad tag with native vlan is used as service match criteria for the service instance. 'encapExact' This bit set to '1' specifies that a service frame is mapped to the service instance only if it matches exactly to the encapsulation specified by the service instance.")
cevcSIMatchEncapEncapsulation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("dot1qEthertype0x8100", 1), ("dot1qEthertype0x9100", 2), ("dot1qEthertype0x9200", 3), ("dot1qEthertype0x88A8", 4), ("dot1adEthertype0x88A8", 5), ("dot1ahEthertype0x88A8", 6)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapEncapsulation.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapEncapsulation.setDescription("This object specifies the encapsulation type used as service match criteria. The object also specifies the Ethertype for egress packets on the service instance. 'dot1qEthertype0x8100' The IEEE 801.1q encapsulation with ether type value 0x8100. 'dot1qEthertype0x9100' The IEEE 801.1q encapsulation with ether type value 0x9100. 'dot1qEthertype0x9200' The IEEE 801.1q encapsulation with ether type value 0x9200. 'dot1qEthertype0x88A8' The IEEE 801.1q encapsulation with ether type value 0x88A8. 'dot1adEthertype0x88A8' The IEEE 801.1ad encapsulation with ether type value 0x88A8. 'dot1ahEthertype0x88A8' The IEEE 801.1ah encapsulation with ether type value 0x88A8.")
cevcSIMatchEncapPrimaryCos = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 5), CiscoCosList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapPrimaryCos.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapPrimaryCos.setDescription("This object specifies the CoS values which the Service Instance uses as service match criteria. This object is valid only when 'primaryVlans' and 'primaryCos' bits are set to '1' in corresponding instance of the object cevcSIMatchEncapValid.")
cevcSIMatchEncapSecondaryCos = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 6), CiscoCosList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapSecondaryCos.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapSecondaryCos.setDescription("This object specifies the CoS values which the Service Instance uses as service match criteria. This object is valid only when 'secondaryVlans' and 'secondaryCos' bits are set to '1' in corresponding instance of the object cevcSIMatchEncapValid.")
cevcSIMatchEncapPayloadType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("payloadType0x0800ip", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapPayloadType.setStatus('deprecated')
if mibBuilder.loadTexts: cevcSIMatchEncapPayloadType.setDescription("This object specifies the PayloadType(etype/protocol type) values that the service instance uses as a service match criteria. This object is required when the forwarding of layer-2 ethernet packet is done through the payloadType i.e IP etc. 'other' None of the following. 'payloadType0x0800ip' Payload type value for IP is 0x0800. This object is valid only when 'payloadType' bit is set to '1' in corresponding instance of the object cevcSIMatchEncapValid. This object is deprecated by cevcSIMatchEncapPayloadTypes.")
cevcSIMatchEncapPayloadTypes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 8), Bits().clone(namedValues=NamedValues(("payloadTypeIPv4", 0), ("payloadTypeIPv6", 1), ("payloadTypePPPoEDiscovery", 2), ("payloadTypePPPoESession", 3), ("payloadTypePPPoEAll", 4)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapPayloadTypes.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapPayloadTypes.setDescription("This object specifies the etype/protocol type values that service instance uses as a service match criteria. This object is required when the forwarding of layer-2 ethernet packet is done through the payload ether type i.e IP etc. 'payloadTypeIPv4' Ethernet payload type value for IPv4 protocol. 'payloadTypeIPv6' Ethernet payload type value for IPv6 protocol. 'payloadTypePPPoEDiscovery' Ethernet payload type value for PPPoE discovery stage. 'payloadTypePPPoESession' Ethernet payload type value for PPPoE session stage. 'payloadTypePPPoEAll' All ethernet payload type values for PPPoE protocol. This object is valid only when 'payloadTypes' bit is set to '1' in corresponding instance of the object cevcSIMatchEncapValid. This object deprecates cevcSIMatchEncapPayloadType.")
cevcSIMatchEncapPriorityCos = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 9), CiscoCosList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapPriorityCos.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapPriorityCos.setDescription("This object specifies the priority CoS values which the service instance uses as service match criteria. This object is valid only when 'priorityCos' bit is set to '1' in corresponding instance of the object cevcSIMatchEncapValid.")
cevcSIPrimaryVlanTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 8), )
if mibBuilder.loadTexts: cevcSIPrimaryVlanTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIPrimaryVlanTable.setDescription('This table contains the primary VLAN ID list for each Service Instance. This table has an expansion dependent relationship on the cevcSIMatchEncapTable, containing zero or more rows for each encapsulation match criteria.')
cevcSIPrimaryVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 8, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSIMatchCriteriaIndex"), (0, "CISCO-EVC-MIB", "cevcSIPrimaryVlanBeginningVlan"))
if mibBuilder.loadTexts: cevcSIPrimaryVlanEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIPrimaryVlanEntry.setDescription("This entry specifies a single VLAN or a range of VLANs contained in the primary VLAN list that's part of the encapsulation match criteria. Entries in this table may be created and deleted via the cevcSIPrimaryVlanRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of the cevcSIPrimaryVlanRowStatus column to 'createAndGo' or 'createAndWait'. Rows are deleted by a SET request setting the value of the cevcSIPrimaryVlanRowStatus column to 'destroy'.")
cevcSIPrimaryVlanBeginningVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 8, 1, 1), VlanId())
if mibBuilder.loadTexts: cevcSIPrimaryVlanBeginningVlan.setStatus('current')
if mibBuilder.loadTexts: cevcSIPrimaryVlanBeginningVlan.setDescription("If cevcSIPrimaryVlanEndingVlan is '0', then this object indicates a single VLAN in the list. If cevcSIPrimaryVlanEndingVlan is not '0', then this object indicates the first VLAN in a range of VLANs.")
cevcSIPrimaryVlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 8, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIPrimaryVlanRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIPrimaryVlanRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSIPrimaryVlanTable. This column cannot be set to 'active' until all objects have been assigned valid values. Writable objects in this table can be modified while the value of the cevcSIPrimaryVlanRowStatus column is 'active'.")
cevcSIPrimaryVlanStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 8, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIPrimaryVlanStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSIPrimaryVlanStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSIPrimaryVlanEndingVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 8, 1, 4), VlanIdOrNone()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIPrimaryVlanEndingVlan.setStatus('current')
if mibBuilder.loadTexts: cevcSIPrimaryVlanEndingVlan.setDescription("This object indicates the last VLAN in a range of VLANs. If the row does not describe a range, then the value of this column must be '0'.")
cevcSISecondaryVlanTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 9), )
if mibBuilder.loadTexts: cevcSISecondaryVlanTable.setStatus('current')
if mibBuilder.loadTexts: cevcSISecondaryVlanTable.setDescription('This table contains the seconadary VLAN ID list for each service instance. This table has an expansion dependent relationship on the cevcSIMatchEncapTable, containing zero or more rows for each encapsulation match criteria.')
cevcSISecondaryVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 9, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSIMatchCriteriaIndex"), (0, "CISCO-EVC-MIB", "cevcSISecondaryVlanBeginningVlan"))
if mibBuilder.loadTexts: cevcSISecondaryVlanEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSISecondaryVlanEntry.setDescription("This entry specifies a single VLAN or a range of VLANs contained in the secondary VLAN list that's part of the encapsulation match criteria. Entries in this table may be created and deleted via the cevcSISecondaryVlanRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of the cevcSISecondaryVlanRowStatus column to 'createAndGo' or 'createAndWait'. Rows are deleted by a SET request setting the value of the cevcSISecondaryVlanRowStatus column to 'destroy'.")
cevcSISecondaryVlanBeginningVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 9, 1, 1), VlanId())
if mibBuilder.loadTexts: cevcSISecondaryVlanBeginningVlan.setStatus('current')
if mibBuilder.loadTexts: cevcSISecondaryVlanBeginningVlan.setDescription("If cevcSISecondaryVlanEndingVlan is '0', then this object indicates a single VLAN in the list. If cevcSISecondaryVlanEndingVlan is not '0', then this object indicates the first VLAN in a range of VLANs.")
cevcSISecondaryVlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 9, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSISecondaryVlanRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSISecondaryVlanRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSISecondaryVlanTable. This column can not be set to 'active' until all objects have been assigned valid values. Writable objects in this table can be modified while the value of cevcSISecondaryVlanRowStatus column is 'active'.")
cevcSISecondaryVlanStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 9, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSISecondaryVlanStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSISecondaryVlanStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSISecondaryVlanEndingVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 9, 1, 4), VlanIdOrNone()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSISecondaryVlanEndingVlan.setStatus('current')
if mibBuilder.loadTexts: cevcSISecondaryVlanEndingVlan.setDescription("This object indicates the last VLAN in a range of VLANs. If the row does not describe a range, then the value of this column must be '0'.")
cevcSIForwardBdTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10), )
if mibBuilder.loadTexts: cevcSIForwardBdTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdTable.setDescription("This table contains the forwarding bridge domain information for each service instance. This table has a sparse dependent relationship on the cevcSITable, containing a row for each service instance having a cevcSIForwardingType of 'bridgeDomain'.")
cevcSIForwardBdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"))
if mibBuilder.loadTexts: cevcSIForwardBdEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdEntry.setDescription("This entry represents an bridged domain used to forward service frames by the service instance. Entries in this table may be created and deleted via the cevcSIForwardBdRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of the cevcSIForwardBdRowStatus column to 'createAndGo' or 'createAndWait'. Rows are deleted by a SET request setting the value of the cevcSIForwardBdRowStatus column to 'destroy'.")
cevcSIForwardBdRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 1), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSIForwardBdTable. This column can not be set to 'active' until all objects have been assigned valid values. Writable objects in this table can be modified while the value of the cevcSIForwardBdRowStatus column is 'active'.")
cevcSIForwardBdStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 2), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSIForwardBdNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 3), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdNumber.setStatus('deprecated')
if mibBuilder.loadTexts: cevcSIForwardBdNumber.setDescription('The bridge domain identifier that is associated with the service instance. A bridge domain refers to a layer 2 broadcast domain spanning a set of physical or virtual ports. Frames are switched Multicast and unknown destination unicast frames are flooded within the confines of the bridge domain.')
cevcSIForwardBdNumberBase = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 4096, 8192, 12288, 16384))).clone(namedValues=NamedValues(("bdNumBase0", 0), ("bdNumBase4096", 4096), ("bdNumBase8192", 8192), ("bdNumBase12288", 12288), ("bdNumBase16384", 16384)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdNumberBase.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdNumberBase.setDescription('This object specifies the base of bridge domain. The bridge domain range is 1~16k, cevcSIForwardBdNumberBase is to track what is the base of each 4k bitmap. In this way we can specify all the 16k bridge domains using four 1k bitmaps and having the base which describes that is the base of each 4k bitmap. The four 1k bitmaps, cevcSIForwardBdNumber1kBitmap represents 0~1023, cevcSIForwardBdNumber2kBitmap represents 1024~2047, cevcSIForwardBdNumber3kBitmap represents 2048~3071, cevcSIForwardBdNumber4kBitmap represents 3072~4095 And cevcSIForwardBdNumberBase is one of 0, 4096, 8192, 12288, 16384. SNMP Administrator can use cevcSIForwardBdNumberBase + (position of the set bit in four 1k bitmaps) to get BD number of a service instance.')
cevcSIForwardBdNumber1kBitmap = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdNumber1kBitmap.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdNumber1kBitmap.setDescription("This object specifies a string of octets containing one bit per Bridge domain per service instance(generally we have one bridge domain per nontrunk service instance but can have more than one bridge configured with a trunk service instance). The first octet corresponds to Bridge domains with Bridge domain ID values of 0 through 7; the second octet to Bridge domains 8 through 15; etc. Thus, this 128-octet bitmap represents bridge domain ID value 0~1023. For each Bridge domain configured, the bit corresponding to that bridge domain is set to '1'. SNMP Administrator uses cevcSIForwardBdNumberBase + (position of the set bit in bitmap)to calculate BD number of a service instance. Note that if the length of this string is less than 128 octets, any 'missing' octets are assumed to contain the value zero. An NMS may omit any zero-valued octets from the end of this string in order to reduce SetPDU size, and the agent may also omit zero-valued trailing octets, to reduce the size of GetResponse PDUs.")
cevcSIForwardBdNumber2kBitmap = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdNumber2kBitmap.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdNumber2kBitmap.setDescription("This object specifies a string of octets containing one bit per Bridge domain per service instance(generally we have one bridge domain per nontrunk service instance but can have more than one bridge configured with a trunk service instance). The first octet corresponds to Bridge domains with Bridge domain ID values of 1024 through 1031; the second octet to Bridge domains 1032 through 1039; etc. Thus, this 128-octet bitmap represents bridge domain ID value 1024~2047. For each Bridge domain configured, the bit corresponding to that bridge domain is set to 1. SNMP Administrator uses cevcSIForwardBdNumberBase + (position of the set bit in bitmap)to calculate BD number of a service instance. Note that if the length of this string is less than 128 octets, any 'missing' octets are assumed to contain the value zero. An NMS may omit any zero-valued octets from the end of this string in order to reduce SetPDU size, and the agent may also omit zero-valued trailing octets, to reduce the size of GetResponse PDUs.")
cevcSIForwardBdNumber3kBitmap = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdNumber3kBitmap.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdNumber3kBitmap.setDescription("This object specifies a string of octets containing one bit per Bridge domain per service instance(generally we have one bridge domain per non-trunk service instance but can have more than one bridge configured with a trunk service instance). The first octet corresponds to Bridge domains with Bridgedomain ID values of 2048 through 2055; the second octet to Bridge domains 2056 through 2063; etc. Thus, this 128-octet bitmap represents bridge domain ID value 2048~3071. For each Bridge domain configured, the bit corresponding to that bridge domain is set to 1. SNMP Administrator uses cevcSIForwardBdNumberBase + (position of the set bit in bitmap)to calculate BD number of a service instance. Note that if the length of this string is less than 128 octets, any 'missing' octets are assumed to contain the value zero. An NMS may omit any zero-valued octets from the end of this string in order to reduce SetPDU size, and the agent may also omit zero-valued trailing octets, to reduce the size of GetResponse PDUs.")
cevcSIForwardBdNumber4kBitmap = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdNumber4kBitmap.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdNumber4kBitmap.setDescription("This object specifies a string of octets containing one bit per Bridge domain per service instance(generally we have one bridge domain per non-trunk service instance but can have more than one bridge configured with a trunk service instance). The first octet corresponds to Bridge domains with Bridgedomain ID values of 3078 through 3085; the second octet to Bridge domains 3086 through 3093; etc. Thus, this 128-octet bitmap represents bridge domain ID value 3072~4095. For each Bridge domain configured, the bit corresponding to that bridge domain is set to 1. SNMP Administrator uses cevcSIForwardBdNumberBase + (position of the set bit in bitmap)to calculate BD number of a service instance. Note that if the length of this string is less than 128 octets, any 'missing' octets are assumed to contain the value zero. An NMS may omit any zero-valued octets from the end of this string in order to reduce SetPDU size, and the agent may also omit zero-valued trailing octets, to reduce the size of GetResponse PDUs.")
cevcEvcNotifyEnabled = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 5, 1), Bits().clone(namedValues=NamedValues(("status", 0), ("creation", 1), ("deletion", 2), ("macSecurityViolation", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cevcEvcNotifyEnabled.setStatus('current')
if mibBuilder.loadTexts: cevcEvcNotifyEnabled.setDescription("This object specifies the system generation of notification, including: 'status' This bit set to '1' specifies the system generation of cevcEvcStatusChangedNotification. 'creation' This bit set to '1' specifies the system generation of cevcEvcCreationNotification. 'deletion' This bit set to '1' specifices the system generation of cevcEvcDeletionNotification. 'macSecurityViolation' This bit set to '1' specifies the system generation of cevcMacSecurityViolation.")
ciscoEvcNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 0, 0))
cevcEvcStatusChangedNotification = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 613, 0, 0, 1)).setObjects(("CISCO-EVC-MIB", "cevcEvcOperStatus"), ("CISCO-EVC-MIB", "cevcEvcCfgUnis"), ("CISCO-EVC-MIB", "cevcEvcActiveUnis"))
if mibBuilder.loadTexts: cevcEvcStatusChangedNotification.setStatus('current')
if mibBuilder.loadTexts: cevcEvcStatusChangedNotification.setDescription("A device generates this notification when an EVC's operational status changes, or the number of active UNIs associated with the EVC (cevcNumActiveUnis) changes.")
cevcEvcCreationNotification = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 613, 0, 0, 2)).setObjects(("CISCO-EVC-MIB", "cevcEvcOperStatus"))
if mibBuilder.loadTexts: cevcEvcCreationNotification.setStatus('current')
if mibBuilder.loadTexts: cevcEvcCreationNotification.setDescription('A device generates this notification upon the creation of an EVC.')
cevcEvcDeletionNotification = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 613, 0, 0, 3)).setObjects(("CISCO-EVC-MIB", "cevcEvcOperStatus"))
if mibBuilder.loadTexts: cevcEvcDeletionNotification.setStatus('current')
if mibBuilder.loadTexts: cevcEvcDeletionNotification.setDescription('A device generates this notification upon the deletion of an EVC.')
cevcMacSecurityViolationNotification = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 613, 0, 0, 4)).setObjects(("IF-MIB", "ifIndex"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumberBase"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber1kBitmap"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber2kBitmap"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber3kBitmap"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber4kBitmap"), ("CISCO-EVC-MIB", "cevcSIID"), ("CISCO-EVC-MIB", "cevcMacAddress"), ("CISCO-EVC-MIB", "cevcMaxMacConfigLimit"), ("CISCO-EVC-MIB", "cevcViolationCause"))
if mibBuilder.loadTexts: cevcMacSecurityViolationNotification.setStatus('current')
if mibBuilder.loadTexts: cevcMacSecurityViolationNotification.setDescription("A SNMP entity generates this notification in the following cases: When the system MAC Address limit is exceeded, the cevcMacSecurityViolationCauseType will contain 'exceedSystemLimit' value. When the Bridge domain limit is exceeded, the cevcMacSecurityViolationCauseType will contain 'exceedBdLimit' value. When the Service Instance limit is exceeded, the cevcMacSecurityViolationCauseType will contain 'exceedSILimit' value. If the MAC address is present in the Black list then cevcMacSecurityViolationCauseType will contain 'blackListDeny' value. Description of all the varbinds for this Notification is as follows: ifIndex indicates the interface index which identifies the interface that the service instance is attached. cevcSIForwardBdNumberBase indicates the base of bridge domain. The bridge domain range is 1~16k, this object is to track the base of each 4k bitmap. cevcSIForwardBdNumber1kBitmap indicates a string of octets containing one bit per Bridge domain per service instance. This 128-octet bitmap represents bridge domain ID values 0~1023. cevcSIForwardBdNumber2kBitmap indicates a string of octets containing one bit per Bridge domain per service instance. This 128-octet bitmap represents bridge domain ID values 1024~2047. cevcSIForwardBdNumber3kBitmap indicates a string of octets containing one bit per Bridge domain per service instance. This 128-octet bitmap represents bridge domain ID values 2048~3071. cevcSIForwardBdNumber4kBitmap indicates a string of octets containing one bit per Bridge domain per service instance. This 128-octet bitmap represents bridge domain ID values 3072~4095. cevcSIID indicates the service instance ID for the Mac security violation notification. cevcMacAddress indicates the Mac address which has violated the Mac security rules. cevcMaxMacConfigLimit indicates the maximum Mac configuration limit. This is also sent as a part of Mac security violation notification. cevcViolationCause indicates the Mac security violation cause.")
ciscoEvcMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 1))
ciscoEvcMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2))
ciscoEvcMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 1, 1)).setObjects(("CISCO-EVC-MIB", "cevcSystemGroup"), ("CISCO-EVC-MIB", "cevcPortGroup"), ("CISCO-EVC-MIB", "cevcEvcGroup"), ("CISCO-EVC-MIB", "cevcSIGroup"), ("CISCO-EVC-MIB", "cevcEvcNotificationConfigGroup"), ("CISCO-EVC-MIB", "cevcEvcNotificationGroup"), ("CISCO-EVC-MIB", "cevcSICosMatchCriteriaGroup"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteGroup"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaGroup"), ("CISCO-EVC-MIB", "cevcSIForwardGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEvcMIBCompliance = ciscoEvcMIBCompliance.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoEvcMIBCompliance.setDescription('The new compliance statement for entities which implement the CISCO-EVC-MIB.')
ciscoEvcMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 1, 2)).setObjects(("CISCO-EVC-MIB", "cevcSystemGroup"), ("CISCO-EVC-MIB", "cevcPortGroup"), ("CISCO-EVC-MIB", "cevcEvcGroup"), ("CISCO-EVC-MIB", "cevcSIGroup"), ("CISCO-EVC-MIB", "cevcEvcNotificationConfigGroup"), ("CISCO-EVC-MIB", "cevcEvcNotificationGroup"), ("CISCO-EVC-MIB", "cevcSICosMatchCriteriaGroup"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteGroup"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaGroupRev1"), ("CISCO-EVC-MIB", "cevcSIForwardGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEvcMIBComplianceRev1 = ciscoEvcMIBComplianceRev1.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoEvcMIBComplianceRev1.setDescription('The compliance statement for entities which implement the CISCO-EVC-MIB. This compliance module deprecates ciscoEvcMIBCompliance.')
ciscoEvcMIBComplianceRev2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 1, 3)).setObjects(("CISCO-EVC-MIB", "cevcSystemGroup"), ("CISCO-EVC-MIB", "cevcPortGroup"), ("CISCO-EVC-MIB", "cevcEvcGroup"), ("CISCO-EVC-MIB", "cevcSIGroupRev1"), ("CISCO-EVC-MIB", "cevcEvcNotificationConfigGroup"), ("CISCO-EVC-MIB", "cevcEvcNotificationGroupRev1"), ("CISCO-EVC-MIB", "cevcSICosMatchCriteriaGroup"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteGroup"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaGroupRev1"), ("CISCO-EVC-MIB", "cevcSIForwardGroupRev1"), ("CISCO-EVC-MIB", "cevcMacSecurityViolationGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEvcMIBComplianceRev2 = ciscoEvcMIBComplianceRev2.setStatus('current')
if mibBuilder.loadTexts: ciscoEvcMIBComplianceRev2.setDescription('The compliance statement for entities which implement the CISCO-EVC-MIB. This compliance module deprecates ciscoEvcMIBComplianceRev1.')
cevcSystemGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 1)).setObjects(("CISCO-EVC-MIB", "cevcMaxNumEvcs"), ("CISCO-EVC-MIB", "cevcNumCfgEvcs"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSystemGroup = cevcSystemGroup.setStatus('current')
if mibBuilder.loadTexts: cevcSystemGroup.setDescription('A collection of objects providing system configuration of EVCs.')
cevcPortGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 2)).setObjects(("CISCO-EVC-MIB", "cevcPortMode"), ("CISCO-EVC-MIB", "cevcPortMaxNumEVCs"), ("CISCO-EVC-MIB", "cevcPortMaxNumServiceInstances"), ("CISCO-EVC-MIB", "cevcPortL2ControlProtocolAction"), ("CISCO-EVC-MIB", "cevcUniIdentifier"), ("CISCO-EVC-MIB", "cevcUniPortType"), ("CISCO-EVC-MIB", "cevcUniServiceAttributes"), ("CISCO-EVC-MIB", "cevcUniCEVlanEvcEndingVlan"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcPortGroup = cevcPortGroup.setStatus('current')
if mibBuilder.loadTexts: cevcPortGroup.setDescription('A collection of objects providing configuration for ports in an EVC.')
cevcEvcGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 3)).setObjects(("CISCO-EVC-MIB", "cevcEvcIdentifier"), ("CISCO-EVC-MIB", "cevcEvcType"), ("CISCO-EVC-MIB", "cevcEvcOperStatus"), ("CISCO-EVC-MIB", "cevcEvcCfgUnis"), ("CISCO-EVC-MIB", "cevcEvcActiveUnis"), ("CISCO-EVC-MIB", "cevcEvcStorageType"), ("CISCO-EVC-MIB", "cevcEvcRowStatus"), ("CISCO-EVC-MIB", "cevcEvcUniId"), ("CISCO-EVC-MIB", "cevcEvcUniOperStatus"), ("CISCO-EVC-MIB", "cevcEvcLocalUniIfIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcEvcGroup = cevcEvcGroup.setStatus('current')
if mibBuilder.loadTexts: cevcEvcGroup.setDescription('A collection of objects providing configuration and status information for EVCs.')
cevcSIGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 4)).setObjects(("CISCO-EVC-MIB", "cevcSIName"), ("CISCO-EVC-MIB", "cevcSITargetType"), ("CISCO-EVC-MIB", "cevcSITarget"), ("CISCO-EVC-MIB", "cevcSIEvcIndex"), ("CISCO-EVC-MIB", "cevcSIRowStatus"), ("CISCO-EVC-MIB", "cevcSIStorageType"), ("CISCO-EVC-MIB", "cevcSIAdminStatus"), ("CISCO-EVC-MIB", "cevcSIOperStatus"), ("CISCO-EVC-MIB", "cevcSIL2ControlProtocolAction"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteAction"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteEncapsulation"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteVlan1"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteVlan2"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteSymmetric"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteStorageType"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteRowStatus"), ("CISCO-EVC-MIB", "cevcSIForwardingType"), ("CISCO-EVC-MIB", "cevcSICEVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSICEVlanStorageType"), ("CISCO-EVC-MIB", "cevcSICEVlanEndingVlan"), ("CISCO-EVC-MIB", "cevcSIMatchStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchEncapStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapValid"), ("CISCO-EVC-MIB", "cevcSIMatchEncapEncapsulation"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanStorageType"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanEndingVlan"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIGroup = cevcSIGroup.setStatus('deprecated')
if mibBuilder.loadTexts: cevcSIGroup.setDescription('A collection of objects providing configuration and match criteria for service instances. cevcSIGroup object is superseded by cevcSIGroupRev1.')
cevcSIVlanRewriteGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 5)).setObjects(("CISCO-EVC-MIB", "cevcSIVlanRewriteAction"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteEncapsulation"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteVlan1"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteVlan2"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteSymmetric"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteStorageType"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIVlanRewriteGroup = cevcSIVlanRewriteGroup.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteGroup.setDescription('A collection of objects which provides VLAN rewrite information for a service instance.')
cevcSICosMatchCriteriaGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 6)).setObjects(("CISCO-EVC-MIB", "cevcSIMatchEncapPrimaryCos"), ("CISCO-EVC-MIB", "cevcSIMatchEncapSecondaryCos"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSICosMatchCriteriaGroup = cevcSICosMatchCriteriaGroup.setStatus('current')
if mibBuilder.loadTexts: cevcSICosMatchCriteriaGroup.setDescription('A collection of objects which provides CoS match criteria for a service instance.')
cevcSIMatchCriteriaGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 7)).setObjects(("CISCO-EVC-MIB", "cevcSIMatchRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchEncapStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapValid"), ("CISCO-EVC-MIB", "cevcSIMatchEncapEncapsulation"), ("CISCO-EVC-MIB", "cevcSIMatchEncapPrimaryCos"), ("CISCO-EVC-MIB", "cevcSIMatchEncapSecondaryCos"), ("CISCO-EVC-MIB", "cevcSIMatchEncapPayloadType"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanStorageType"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanEndingVlan"), ("CISCO-EVC-MIB", "cevcSISecondaryVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSISecondaryVlanStorageType"), ("CISCO-EVC-MIB", "cevcSISecondaryVlanEndingVlan"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIMatchCriteriaGroup = cevcSIMatchCriteriaGroup.setStatus('deprecated')
if mibBuilder.loadTexts: cevcSIMatchCriteriaGroup.setDescription('A collection of objects providing match criteria information for service instances. cevcSIMatchCriteriaGroup object is superseded by cevcSIMatchCriteriaGroupRev1.')
cevcSIForwardGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 8)).setObjects(("CISCO-EVC-MIB", "cevcSIForwardingType"), ("CISCO-EVC-MIB", "cevcSIForwardBdRowStatus"), ("CISCO-EVC-MIB", "cevcSIForwardBdStorageType"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIForwardGroup = cevcSIForwardGroup.setStatus('deprecated')
if mibBuilder.loadTexts: cevcSIForwardGroup.setDescription('A collection of objects providing service frame forwarding information for service instances. cevcSIForwardGroup object is superseded by cevcSIForwardGroupRev1.')
cevcEvcNotificationConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 9)).setObjects(("CISCO-EVC-MIB", "cevcEvcNotifyEnabled"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcEvcNotificationConfigGroup = cevcEvcNotificationConfigGroup.setStatus('current')
if mibBuilder.loadTexts: cevcEvcNotificationConfigGroup.setDescription('A collection of objects for configuring notification of this MIB.')
cevcEvcNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 10)).setObjects(("CISCO-EVC-MIB", "cevcEvcStatusChangedNotification"), ("CISCO-EVC-MIB", "cevcEvcCreationNotification"), ("CISCO-EVC-MIB", "cevcEvcDeletionNotification"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcEvcNotificationGroup = cevcEvcNotificationGroup.setStatus('deprecated')
if mibBuilder.loadTexts: cevcEvcNotificationGroup.setDescription('A collection of notifications that this MIB module is required to implement. cevcEvcNotificationGroup object is superseded by cevcEvcNotificationGroupRev1.')
cevcSIMatchCriteriaGroupRev1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 11)).setObjects(("CISCO-EVC-MIB", "cevcSIMatchRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchEncapStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapValid"), ("CISCO-EVC-MIB", "cevcSIMatchEncapEncapsulation"), ("CISCO-EVC-MIB", "cevcSIMatchEncapPrimaryCos"), ("CISCO-EVC-MIB", "cevcSIMatchEncapSecondaryCos"), ("CISCO-EVC-MIB", "cevcSIMatchEncapPayloadTypes"), ("CISCO-EVC-MIB", "cevcSIMatchEncapPriorityCos"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanStorageType"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanEndingVlan"), ("CISCO-EVC-MIB", "cevcSISecondaryVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSISecondaryVlanStorageType"), ("CISCO-EVC-MIB", "cevcSISecondaryVlanEndingVlan"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIMatchCriteriaGroupRev1 = cevcSIMatchCriteriaGroupRev1.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchCriteriaGroupRev1.setDescription('A collection of objects providing match criteria information for service instances. This group deprecates the old group cevcSIMatchCriteriaGroup.')
cevcEvcNotificationGroupRev1 = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 12)).setObjects(("CISCO-EVC-MIB", "cevcEvcStatusChangedNotification"), ("CISCO-EVC-MIB", "cevcEvcCreationNotification"), ("CISCO-EVC-MIB", "cevcEvcDeletionNotification"), ("CISCO-EVC-MIB", "cevcMacSecurityViolationNotification"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcEvcNotificationGroupRev1 = cevcEvcNotificationGroupRev1.setStatus('current')
if mibBuilder.loadTexts: cevcEvcNotificationGroupRev1.setDescription('A collection of notifications that this MIB module is required to implement. This module deprecates the cevcNotificationGroup')
cevcSIGroupRev1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 13)).setObjects(("CISCO-EVC-MIB", "cevcSIName"), ("CISCO-EVC-MIB", "cevcSITargetType"), ("CISCO-EVC-MIB", "cevcSITarget"), ("CISCO-EVC-MIB", "cevcSIEvcIndex"), ("CISCO-EVC-MIB", "cevcSIRowStatus"), ("CISCO-EVC-MIB", "cevcSIStorageType"), ("CISCO-EVC-MIB", "cevcSIAdminStatus"), ("CISCO-EVC-MIB", "cevcSIOperStatus"), ("CISCO-EVC-MIB", "cevcPortL2ControlProtocolAction"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteAction"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteEncapsulation"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteVlan1"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteVlan2"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteSymmetric"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteRowStatus"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteStorageType"), ("CISCO-EVC-MIB", "cevcSIForwardingType"), ("CISCO-EVC-MIB", "cevcSICEVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSICEVlanStorageType"), ("CISCO-EVC-MIB", "cevcSICEVlanEndingVlan"), ("CISCO-EVC-MIB", "cevcSIMatchStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchEncapStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapValid"), ("CISCO-EVC-MIB", "cevcSIMatchEncapEncapsulation"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanStorageType"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanEndingVlan"), ("CISCO-EVC-MIB", "cevcSIMatchRowStatus"), ("CISCO-EVC-MIB", "cevcSICreationType"), ("CISCO-EVC-MIB", "cevcSIType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIGroupRev1 = cevcSIGroupRev1.setStatus('current')
if mibBuilder.loadTexts: cevcSIGroupRev1.setDescription('A collection of objects providing configuration and match criteria for service instances. This module deprecates the cevcSIGroup')
cevcSIForwardGroupRev1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 14)).setObjects(("CISCO-EVC-MIB", "cevcSIForwardingType"), ("CISCO-EVC-MIB", "cevcSIForwardBdRowStatus"), ("CISCO-EVC-MIB", "cevcSIForwardBdStorageType"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumberBase"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber1kBitmap"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber2kBitmap"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber3kBitmap"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber4kBitmap"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIForwardGroupRev1 = cevcSIForwardGroupRev1.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardGroupRev1.setDescription('A collection of objects providing service frame forwarding information for service instances. This module deprecates cevcSIForwardGroup')
cevcMacSecurityViolationGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 15)).setObjects(("CISCO-EVC-MIB", "cevcMacAddress"), ("CISCO-EVC-MIB", "cevcMaxMacConfigLimit"), ("CISCO-EVC-MIB", "cevcSIID"), ("CISCO-EVC-MIB", "cevcViolationCause"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcMacSecurityViolationGroup = cevcMacSecurityViolationGroup.setStatus('current')
if mibBuilder.loadTexts: cevcMacSecurityViolationGroup.setDescription('A collection of objects providing the maximum configured MAC limit, the MAC address, service instance ID and Violation cause for Mac Security Violation Information.')
mibBuilder.exportSymbols("CISCO-EVC-MIB", cevcEvcStorageType=cevcEvcStorageType, cevcSIType=cevcSIType, cevcEvc=cevcEvc, cevcEvcUniOperStatus=cevcEvcUniOperStatus, ciscoEvcMIBCompliance=ciscoEvcMIBCompliance, cevcEvcUniTable=cevcEvcUniTable, cevcSystemGroup=cevcSystemGroup, cevcNumCfgEvcs=cevcNumCfgEvcs, cevcSICEVlanRowStatus=cevcSICEVlanRowStatus, cevcSIName=cevcSIName, cevcSICreationType=cevcSICreationType, cevcSIForwardBdNumberBase=cevcSIForwardBdNumberBase, cevcSIMatchEncapPayloadTypes=cevcSIMatchEncapPayloadTypes, cevcPortTable=cevcPortTable, ServiceInstanceTarget=ServiceInstanceTarget, cevcSIMatchEncapEntry=cevcSIMatchEncapEntry, PYSNMP_MODULE_ID=ciscoEvcMIB, cevcEvcCfgUnis=cevcEvcCfgUnis, cevcSIForwardGroupRev1=cevcSIForwardGroupRev1, cevcSIForwardBdEntry=cevcSIForwardBdEntry, cevcSIEvcIndex=cevcSIEvcIndex, cevcSIForwardBdNumber3kBitmap=cevcSIForwardBdNumber3kBitmap, cevcSIMatchCriteriaIndex=cevcSIMatchCriteriaIndex, cevcSIGroup=cevcSIGroup, cevcSIForwardBdTable=cevcSIForwardBdTable, cevcEvcUniId=cevcEvcUniId, cevcEvcStatusChangedNotification=cevcEvcStatusChangedNotification, cevcViolationCause=cevcViolationCause, cevcSIPrimaryVlanEntry=cevcSIPrimaryVlanEntry, cevcEvcGroup=cevcEvcGroup, cevcSIIndex=cevcSIIndex, cevcSIVlanRewriteVlan1=cevcSIVlanRewriteVlan1, cevcSITable=cevcSITable, cevcSISecondaryVlanStorageType=cevcSISecondaryVlanStorageType, cevcUniPortType=cevcUniPortType, cevcEvcStateTable=cevcEvcStateTable, cevcSIVlanRewriteSymmetric=cevcSIVlanRewriteSymmetric, cevcSIMatchEncapRowStatus=cevcSIMatchEncapRowStatus, cevcMacSecurityViolationNotification=cevcMacSecurityViolationNotification, cevcEvcNotificationGroupRev1=cevcEvcNotificationGroupRev1, ciscoEvcMIB=ciscoEvcMIB, ciscoEvcMIBComplianceRev1=ciscoEvcMIBComplianceRev1, cevcUniTable=cevcUniTable, cevcUniCEVlanEvcTable=cevcUniCEVlanEvcTable, cevcEvcOperStatus=cevcEvcOperStatus, ciscoEvcMIBNotifications=ciscoEvcMIBNotifications, ciscoEvcMIBComplianceRev2=ciscoEvcMIBComplianceRev2, cevcSIVlanRewriteEntry=cevcSIVlanRewriteEntry, cevcUniCEVlanEvcBeginningVlan=cevcUniCEVlanEvcBeginningVlan, cevcSIPrimaryVlanEndingVlan=cevcSIPrimaryVlanEndingVlan, cevcSIMatchEncapPayloadType=cevcSIMatchEncapPayloadType, cevcSISecondaryVlanRowStatus=cevcSISecondaryVlanRowStatus, cevcEvcStateEntry=cevcEvcStateEntry, cevcPortGroup=cevcPortGroup, cevcSIPrimaryVlanStorageType=cevcSIPrimaryVlanStorageType, cevcSIMatchCriteriaType=cevcSIMatchCriteriaType, cevcSICEVlanTable=cevcSICEVlanTable, cevcSITarget=cevcSITarget, cevcSIAdminStatus=cevcSIAdminStatus, cevcSIL2ControlProtocolType=cevcSIL2ControlProtocolType, ciscoEvcNotificationPrefix=ciscoEvcNotificationPrefix, CiscoEvcIndexOrZero=CiscoEvcIndexOrZero, cevcEvcIdentifier=cevcEvcIdentifier, cevcSIStateEntry=cevcSIStateEntry, cevcSIVlanRewriteTable=cevcSIVlanRewriteTable, cevcSIMatchCriteriaEntry=cevcSIMatchCriteriaEntry, cevcEvcRowStatus=cevcEvcRowStatus, cevcEvcNotificationGroup=cevcEvcNotificationGroup, cevcSIForwardBdNumber2kBitmap=cevcSIForwardBdNumber2kBitmap, cevcMaxNumEvcs=cevcMaxNumEvcs, cevcSIL2ControlProtocolTable=cevcSIL2ControlProtocolTable, cevcEvcUniIndex=cevcEvcUniIndex, cevcEvcIndex=cevcEvcIndex, cevcServiceInstance=cevcServiceInstance, cevcUniCEVlanEvcEntry=cevcUniCEVlanEvcEntry, cevcSICEVlanEntry=cevcSICEVlanEntry, cevcSIVlanRewriteDirection=cevcSIVlanRewriteDirection, cevcSIID=cevcSIID, cevcSIMatchEncapEncapsulation=cevcSIMatchEncapEncapsulation, ciscoEvcMIBObjects=ciscoEvcMIBObjects, ServiceInstanceTargetType=ServiceInstanceTargetType, cevcPort=cevcPort, cevcSIVlanRewriteVlan2=cevcSIVlanRewriteVlan2, cevcSIForwardBdNumber1kBitmap=cevcSIForwardBdNumber1kBitmap, cevcMaxMacConfigLimit=cevcMaxMacConfigLimit, cevcSIMatchEncapSecondaryCos=cevcSIMatchEncapSecondaryCos, cevcPortMaxNumServiceInstances=cevcPortMaxNumServiceInstances, cevcEvcNotifyEnabled=cevcEvcNotifyEnabled, cevcEvcType=cevcEvcType, cevcMacSecurityViolation=cevcMacSecurityViolation, cevcEvcDeletionNotification=cevcEvcDeletionNotification, ciscoEvcMIBGroups=ciscoEvcMIBGroups, cevcSIL2ControlProtocolAction=cevcSIL2ControlProtocolAction, cevcSIVlanRewriteGroup=cevcSIVlanRewriteGroup, cevcUniServiceAttributes=cevcUniServiceAttributes, cevcSIMatchRowStatus=cevcSIMatchRowStatus, cevcSICEVlanStorageType=cevcSICEVlanStorageType, cevcSICEVlanBeginningVlan=cevcSICEVlanBeginningVlan, cevcSIMatchEncapStorageType=cevcSIMatchEncapStorageType, cevcSIL2ControlProtocolEntry=cevcSIL2ControlProtocolEntry, cevcSIMatchCriteriaTable=cevcSIMatchCriteriaTable, cevcEvcActiveUnis=cevcEvcActiveUnis, cevcSIVlanRewriteAction=cevcSIVlanRewriteAction, ciscoEvcMIBCompliances=ciscoEvcMIBCompliances, cevcSICEVlanEndingVlan=cevcSICEVlanEndingVlan, cevcSIPrimaryVlanTable=cevcSIPrimaryVlanTable, cevcSIVlanRewriteEncapsulation=cevcSIVlanRewriteEncapsulation, cevcSIForwardingType=cevcSIForwardingType, cevcSISecondaryVlanBeginningVlan=cevcSISecondaryVlanBeginningVlan, cevcSystem=cevcSystem, ciscoEvcMIBConformance=ciscoEvcMIBConformance, cevcMacSecurityViolationGroup=cevcMacSecurityViolationGroup, cevcSIMatchEncapPriorityCos=cevcSIMatchEncapPriorityCos, cevcSIOperStatus=cevcSIOperStatus, CiscoEvcIndex=CiscoEvcIndex, cevcSIMatchCriteriaGroup=cevcSIMatchCriteriaGroup, cevcSITargetType=cevcSITargetType, cevcPortL2ControlProtocolTable=cevcPortL2ControlProtocolTable, cevcUniIdentifier=cevcUniIdentifier, cevcSISecondaryVlanTable=cevcSISecondaryVlanTable, cevcSIStorageType=cevcSIStorageType, CevcL2ControlProtocolType=CevcL2ControlProtocolType, cevcSIMatchCriteriaGroupRev1=cevcSIMatchCriteriaGroupRev1, cevcSICosMatchCriteriaGroup=cevcSICosMatchCriteriaGroup, cevcSIForwardGroup=cevcSIForwardGroup, cevcEvcUniEntry=cevcEvcUniEntry, cevcEvcNotificationConfigGroup=cevcEvcNotificationConfigGroup, cevcPortL2ControlProtocolAction=cevcPortL2ControlProtocolAction, CevcMacSecurityViolationCauseType=CevcMacSecurityViolationCauseType, cevcSIRowStatus=cevcSIRowStatus, cevcEvcEntry=cevcEvcEntry, cevcEvcCreationNotification=cevcEvcCreationNotification, cevcEvcLocalUniIfIndex=cevcEvcLocalUniIfIndex, cevcUniEntry=cevcUniEntry, cevcSIVlanRewriteRowStatus=cevcSIVlanRewriteRowStatus, cevcPortMaxNumEVCs=cevcPortMaxNumEVCs, cevcPortL2ControlProtocolType=cevcPortL2ControlProtocolType, cevcSISecondaryVlanEntry=cevcSISecondaryVlanEntry, cevcUniCEVlanEvcEndingVlan=cevcUniCEVlanEvcEndingVlan, cevcSIForwardBdRowStatus=cevcSIForwardBdRowStatus, cevcPortMode=cevcPortMode, cevcMacAddress=cevcMacAddress, cevcSIMatchEncapValid=cevcSIMatchEncapValid, cevcUniEvcIndex=cevcUniEvcIndex, cevcPortL2ControlProtocolEntry=cevcPortL2ControlProtocolEntry, cevcSIVlanRewriteStorageType=cevcSIVlanRewriteStorageType, cevcSIStateTable=cevcSIStateTable, cevcSIPrimaryVlanRowStatus=cevcSIPrimaryVlanRowStatus, cevcSIMatchEncapTable=cevcSIMatchEncapTable, cevcSISecondaryVlanEndingVlan=cevcSISecondaryVlanEndingVlan, cevcSIForwardBdNumber4kBitmap=cevcSIForwardBdNumber4kBitmap, cevcPortEntry=cevcPortEntry, cevcSIPrimaryVlanBeginningVlan=cevcSIPrimaryVlanBeginningVlan, ServiceInstanceInterface=ServiceInstanceInterface, cevcSIForwardBdNumber=cevcSIForwardBdNumber, cevcSIMatchEncapPrimaryCos=cevcSIMatchEncapPrimaryCos, cevcEvcTable=cevcEvcTable, cevcSIForwardBdStorageType=cevcSIForwardBdStorageType, cevcSIGroupRev1=cevcSIGroupRev1, cevcEvcNotificationConfig=cevcEvcNotificationConfig, cevcSIEntry=cevcSIEntry, cevcSIMatchStorageType=cevcSIMatchStorageType)
| 232.810247 | 7,344 | 0.79058 | [
"Apache-2.0"
] | agustinhenze/mibs.snmplabs.com | pysnmp-with-texts/CISCO-EVC-MIB.py | 122,691 | Python |
Desc = cellDescClass("SDFFSRX1")
Desc.properties["cell_footprint"] = "sdffsr"
Desc.properties["area"] = "99.792000"
Desc.properties["cell_leakage_power"] = "2335.442220"
Desc.pinOrder = ['CK', 'D', 'IQ', 'IQN', 'Q', 'QN', 'RN', 'SE', 'SI', 'SN', 'next']
Desc.add_arc("CK","SI","setup_rising")
Desc.add_arc("CK","SI","hold_rising")
Desc.add_arc("CK","SE","setup_rising")
Desc.add_arc("CK","SE","hold_rising")
Desc.add_arc("CK","D","setup_rising")
Desc.add_arc("CK","D","hold_rising")
Desc.add_arc("CK","SN","setup_rising")
Desc.add_arc("CK","SN","hold_rising")
Desc.add_arc("CK","RN","setup_rising")
Desc.add_arc("CK","RN","hold_rising")
Desc.add_arc("CK","Q","rising_edge")
Desc.add_arc("SN","Q","preset")
Desc.add_arc("RN","Q","clear")
Desc.add_arc("CK","QN","rising_edge")
Desc.add_arc("SN","QN","clear")
Desc.add_arc("RN","QN","preset")
Desc.add_param("area",99.792000);
Desc.add_pin("SI","input")
Desc.add_pin("SE","input")
Desc.add_pin("D","input")
Desc.set_pin_job("CK","clock")
Desc.add_pin("CK","input")
Desc.add_pin("SN","input")
Desc.add_pin("RN","input")
Desc.add_pin("Q","output")
Desc.add_pin_func("Q","unknown")
Desc.add_pin("QN","output")
Desc.add_pin_func("QN","unknown")
Desc.add_pin("IQ","output")
Desc.add_pin_func("IQ","unknown")
Desc.add_pin("IQN","output")
Desc.add_pin_func("IQN","unknown")
Desc.add_pin("next","output")
Desc.add_pin_func("next","unknown")
Desc.set_job("flipflop")
CellLib["SDFFSRX1"]=Desc
| 34.047619 | 83 | 0.67972 | [
"MIT"
] | avielazari/vlsistuff | synlib/descriptions/SDFFSRX1.py | 1,430 | Python |
# -*- coding: utf-8 -*-
from django.db import models
from django.shortcuts import render,redirect
from django.views import View
from django.contrib.auth import authenticate, login , logout as django_logout
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.http import Http404, HttpResponseRedirect
from django.core.paginator import Paginator
import django_filters
from django import forms
from .models import B2BRecord
# Create your views here.
class LendRecordFilter(django_filters.FilterSet):
created = django_filters.DateFromToRangeFilter(
widget=forms.SplitDateTimeWidget(
attrs={
'class':'datepicker',
'type':'date',
}
)
)
class Meta:
model = B2BRecord
fields = ['depot','created']
class LendRecordListView(View):
def get(self,request):
ft = LendRecordFilter(request.GET)
merchandises = ft.qs
paginator = Paginator(merchandises, 15)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request,'lend_record_list.html',{'records':page_obj,'filter':ft}) | 26.214286 | 81 | 0.768392 | [
"Apache-2.0"
] | v1ct0r5u3n/TBS | TBS/b2b/views.py | 1,101 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a self-generating script that contains all of the iso3166-1 data.
To regenerate, a CSV file must be created that contains the latest data. Here's
how to do that:
1. Visit https://www.iso.org/obp
2. Click the "Country Codes" radio option and click the search button
3. Filter by "Officially assigned codes"
4. Change the results per page to 300
5. Copy the html table and paste into Libreoffice Calc / Excel
6. Delete the French name column
7. Save as a CSV file in django_countries/iso3166-1.csv
8. Run this script from the command line
"""
from __future__ import unicode_literals
import glob
import os
try:
from django.utils.translation import ugettext_lazy as _
except ImportError: # pragma: no cover
# Allows this module to be executed without Django installed.
_ = lambda x: x
COMMON_NAMES = {
"BN": _("Brunei"),
"BO": _("Bolivia"),
"GB": _("United Kingdom"),
"IR": _("Iran"),
"KP": _("North Korea"),
"KR": _("South Korea"),
"LA": _("Laos"),
"MD": _("Moldova"),
"MK": _("Macedonia"),
"RU": _("Russia"),
"SY": _("Syria"),
"TW": _("Taiwan"),
"TZ": _("Tanzania"),
"VE": _("Venezuela"),
"VN": _("Vietnam"),
}
# Nicely titled (and translatable) country names.
COUNTRIES = {
"AF": _("Afghanistan"),
"AX": _("Åland Islands"),
"AL": _("Albania"),
"DZ": _("Algeria"),
"AS": _("American Samoa"),
"AD": _("Andorra"),
"AO": _("Angola"),
"AI": _("Anguilla"),
"AQ": _("Antarctica"),
"AG": _("Antigua and Barbuda"),
"AR": _("Argentina"),
"AM": _("Armenia"),
"AW": _("Aruba"),
"AU": _("Australia"),
"AT": _("Austria"),
"AZ": _("Azerbaijan"),
"BS": _("Bahamas"),
"BH": _("Bahrain"),
"BD": _("Bangladesh"),
"BB": _("Barbados"),
"BY": _("Belarus"),
"BE": _("Belgium"),
"BZ": _("Belize"),
"BJ": _("Benin"),
"BM": _("Bermuda"),
"BT": _("Bhutan"),
"BO": _("Bolivia (Plurinational State of)"),
"BQ": _("Bonaire, Sint Eustatius and Saba"),
"BA": _("Bosnia and Herzegovina"),
"BW": _("Botswana"),
"BV": _("Bouvet Island"),
"BR": _("Brazil"),
"IO": _("British Indian Ocean Territory"),
"BN": _("Brunei Darussalam"),
"BG": _("Bulgaria"),
"BF": _("Burkina Faso"),
"BI": _("Burundi"),
"CV": _("Cabo Verde"),
"KH": _("Cambodia"),
"CM": _("Cameroon"),
"CA": _("Canada"),
"KY": _("Cayman Islands"),
"CF": _("Central African Republic"),
"TD": _("Chad"),
"CL": _("Chile"),
"CN": _("China"),
"CX": _("Christmas Island"),
"CC": _("Cocos (Keeling) Islands"),
"CO": _("Colombia"),
"KM": _("Comoros"),
"CD": _("Congo (the Democratic Republic of the)"),
"CG": _("Congo"),
"CK": _("Cook Islands"),
"CR": _("Costa Rica"),
"CI": _("Côte d'Ivoire"),
"HR": _("Croatia"),
"CU": _("Cuba"),
"CW": _("Curaçao"),
"CY": _("Cyprus"),
"CZ": _("Czechia"),
"DK": _("Denmark"),
"DJ": _("Djibouti"),
"DM": _("Dominica"),
"DO": _("Dominican Republic"),
"EC": _("Ecuador"),
"EG": _("Egypt"),
"SV": _("El Salvador"),
"GQ": _("Equatorial Guinea"),
"ER": _("Eritrea"),
"EE": _("Estonia"),
"ET": _("Ethiopia"),
"FK": _("Falkland Islands [Malvinas]"),
"FO": _("Faroe Islands"),
"FJ": _("Fiji"),
"FI": _("Finland"),
"FR": _("France"),
"GF": _("French Guiana"),
"PF": _("French Polynesia"),
"TF": _("French Southern Territories"),
"GA": _("Gabon"),
"GM": _("Gambia"),
"GE": _("Georgia"),
"DE": _("Germany"),
"GH": _("Ghana"),
"GI": _("Gibraltar"),
"GR": _("Greece"),
"GL": _("Greenland"),
"GD": _("Grenada"),
"GP": _("Guadeloupe"),
"GU": _("Guam"),
"GT": _("Guatemala"),
"GG": _("Guernsey"),
"GN": _("Guinea"),
"GW": _("Guinea-Bissau"),
"GY": _("Guyana"),
"HT": _("Haiti"),
"HM": _("Heard Island and McDonald Islands"),
"VA": _("Holy See"),
"HN": _("Honduras"),
"HK": _("Hong Kong"),
"HU": _("Hungary"),
"IS": _("Iceland"),
"IN": _("India"),
"ID": _("Indonesia"),
"IR": _("Iran (Islamic Republic of)"),
"IQ": _("Iraq"),
"IE": _("Ireland"),
"IM": _("Isle of Man"),
"IL": _("Israel"),
"IT": _("Italy"),
"JM": _("Jamaica"),
"JP": _("Japan"),
"JE": _("Jersey"),
"JO": _("Jordan"),
"KZ": _("Kazakhstan"),
"KE": _("Kenya"),
"KI": _("Kiribati"),
"KP": _("Korea (the Democratic People's Republic of)"),
"KR": _("Korea (the Republic of)"),
"KW": _("Kuwait"),
"KG": _("Kyrgyzstan"),
"LA": _("Lao People's Democratic Republic"),
"LV": _("Latvia"),
"LB": _("Lebanon"),
"LS": _("Lesotho"),
"LR": _("Liberia"),
"LY": _("Libya"),
"LI": _("Liechtenstein"),
"LT": _("Lithuania"),
"LU": _("Luxembourg"),
"MO": _("Macao"),
"MK": _("Macedonia (the former Yugoslav Republic of)"),
"MG": _("Madagascar"),
"MW": _("Malawi"),
"MY": _("Malaysia"),
"MV": _("Maldives"),
"ML": _("Mali"),
"MT": _("Malta"),
"MH": _("Marshall Islands"),
"MQ": _("Martinique"),
"MR": _("Mauritania"),
"MU": _("Mauritius"),
"YT": _("Mayotte"),
"MX": _("Mexico"),
"FM": _("Micronesia (Federated States of)"),
"MD": _("Moldova (the Republic of)"),
"MC": _("Monaco"),
"MN": _("Mongolia"),
"ME": _("Montenegro"),
"MS": _("Montserrat"),
"MA": _("Morocco"),
"MZ": _("Mozambique"),
"MM": _("Myanmar"),
"NA": _("Namibia"),
"NR": _("Nauru"),
"NP": _("Nepal"),
"NL": _("Netherlands"),
"NC": _("New Caledonia"),
"NZ": _("New Zealand"),
"NI": _("Nicaragua"),
"NE": _("Niger"),
"NG": _("Nigeria"),
"NU": _("Niue"),
"NF": _("Norfolk Island"),
"MP": _("Northern Mariana Islands"),
"NO": _("Norway"),
"OM": _("Oman"),
"PK": _("Pakistan"),
"PW": _("Palau"),
"PS": _("Palestine, State of"),
"PA": _("Panama"),
"PG": _("Papua New Guinea"),
"PY": _("Paraguay"),
"PE": _("Peru"),
"PH": _("Philippines"),
"PN": _("Pitcairn"),
"PL": _("Poland"),
"PT": _("Portugal"),
"PR": _("Puerto Rico"),
"QA": _("Qatar"),
"RE": _("Réunion"),
"RO": _("Romania"),
"RU": _("Russian Federation"),
"RW": _("Rwanda"),
"BL": _("Saint Barthélemy"),
"SH": _("Saint Helena, Ascension and Tristan da Cunha"),
"KN": _("Saint Kitts and Nevis"),
"LC": _("Saint Lucia"),
"MF": _("Saint Martin (French part)"),
"PM": _("Saint Pierre and Miquelon"),
"VC": _("Saint Vincent and the Grenadines"),
"WS": _("Samoa"),
"SM": _("San Marino"),
"ST": _("Sao Tome and Principe"),
"SA": _("Saudi Arabia"),
"SN": _("Senegal"),
"RS": _("Serbia"),
"SC": _("Seychelles"),
"SL": _("Sierra Leone"),
"SG": _("Singapore"),
"SX": _("Sint Maarten (Dutch part)"),
"SK": _("Slovakia"),
"SI": _("Slovenia"),
"SB": _("Solomon Islands"),
"SO": _("Somalia"),
"ZA": _("South Africa"),
"GS": _("South Georgia and the South Sandwich Islands"),
"SS": _("South Sudan"),
"ES": _("Spain"),
"LK": _("Sri Lanka"),
"SD": _("Sudan"),
"SR": _("Suriname"),
"SJ": _("Svalbard and Jan Mayen"),
"SZ": _("Swaziland"),
"SE": _("Sweden"),
"CH": _("Switzerland"),
"SY": _("Syrian Arab Republic"),
"TW": _("Taiwan (Province of China)"),
"TJ": _("Tajikistan"),
"TZ": _("Tanzania, United Republic of"),
"TH": _("Thailand"),
"TL": _("Timor-Leste"),
"TG": _("Togo"),
"TK": _("Tokelau"),
"TO": _("Tonga"),
"TT": _("Trinidad and Tobago"),
"TN": _("Tunisia"),
"TR": _("Turkey"),
"TM": _("Turkmenistan"),
"TC": _("Turks and Caicos Islands"),
"TV": _("Tuvalu"),
"UG": _("Uganda"),
"UA": _("Ukraine"),
"AE": _("United Arab Emirates"),
"GB": _("United Kingdom of Great Britain and Northern Ireland"),
"UM": _("United States Minor Outlying Islands"),
"US": _("United States of America"),
"UY": _("Uruguay"),
"UZ": _("Uzbekistan"),
"VU": _("Vanuatu"),
"VE": _("Venezuela (Bolivarian Republic of)"),
"VN": _("Viet Nam"),
"VG": _("Virgin Islands (British)"),
"VI": _("Virgin Islands (U.S.)"),
"WF": _("Wallis and Futuna"),
"EH": _("Western Sahara"),
"YE": _("Yemen"),
"ZM": _("Zambia"),
"ZW": _("Zimbabwe"),
}
ALT_CODES = {
"AF": ("AFG", 4),
"AX": ("ALA", 248),
"AL": ("ALB", 8),
"DZ": ("DZA", 12),
"AS": ("ASM", 16),
"AD": ("AND", 20),
"AO": ("AGO", 24),
"AI": ("AIA", 660),
"AQ": ("ATA", 10),
"AG": ("ATG", 28),
"AR": ("ARG", 32),
"AM": ("ARM", 51),
"AW": ("ABW", 533),
"AU": ("AUS", 36),
"AT": ("AUT", 40),
"AZ": ("AZE", 31),
"BS": ("BHS", 44),
"BH": ("BHR", 48),
"BD": ("BGD", 50),
"BB": ("BRB", 52),
"BY": ("BLR", 112),
"BE": ("BEL", 56),
"BZ": ("BLZ", 84),
"BJ": ("BEN", 204),
"BM": ("BMU", 60),
"BT": ("BTN", 64),
"BO": ("BOL", 68),
"BQ": ("BES", 535),
"BA": ("BIH", 70),
"BW": ("BWA", 72),
"BV": ("BVT", 74),
"BR": ("BRA", 76),
"IO": ("IOT", 86),
"BN": ("BRN", 96),
"BG": ("BGR", 100),
"BF": ("BFA", 854),
"BI": ("BDI", 108),
"CV": ("CPV", 132),
"KH": ("KHM", 116),
"CM": ("CMR", 120),
"CA": ("CAN", 124),
"KY": ("CYM", 136),
"CF": ("CAF", 140),
"TD": ("TCD", 148),
"CL": ("CHL", 152),
"CN": ("CHN", 156),
"CX": ("CXR", 162),
"CC": ("CCK", 166),
"CO": ("COL", 170),
"KM": ("COM", 174),
"CD": ("COD", 180),
"CG": ("COG", 178),
"CK": ("COK", 184),
"CR": ("CRI", 188),
"CI": ("CIV", 384),
"HR": ("HRV", 191),
"CU": ("CUB", 192),
"CW": ("CUW", 531),
"CY": ("CYP", 196),
"CZ": ("CZE", 203),
"DK": ("DNK", 208),
"DJ": ("DJI", 262),
"DM": ("DMA", 212),
"DO": ("DOM", 214),
"EC": ("ECU", 218),
"EG": ("EGY", 818),
"SV": ("SLV", 222),
"GQ": ("GNQ", 226),
"ER": ("ERI", 232),
"EE": ("EST", 233),
"ET": ("ETH", 231),
"FK": ("FLK", 238),
"FO": ("FRO", 234),
"FJ": ("FJI", 242),
"FI": ("FIN", 246),
"FR": ("FRA", 250),
"GF": ("GUF", 254),
"PF": ("PYF", 258),
"TF": ("ATF", 260),
"GA": ("GAB", 266),
"GM": ("GMB", 270),
"GE": ("GEO", 268),
"DE": ("DEU", 276),
"GH": ("GHA", 288),
"GI": ("GIB", 292),
"GR": ("GRC", 300),
"GL": ("GRL", 304),
"GD": ("GRD", 308),
"GP": ("GLP", 312),
"GU": ("GUM", 316),
"GT": ("GTM", 320),
"GG": ("GGY", 831),
"GN": ("GIN", 324),
"GW": ("GNB", 624),
"GY": ("GUY", 328),
"HT": ("HTI", 332),
"HM": ("HMD", 334),
"VA": ("VAT", 336),
"HN": ("HND", 340),
"HK": ("HKG", 344),
"HU": ("HUN", 348),
"IS": ("ISL", 352),
"IN": ("IND", 356),
"ID": ("IDN", 360),
"IR": ("IRN", 364),
"IQ": ("IRQ", 368),
"IE": ("IRL", 372),
"IM": ("IMN", 833),
"IL": ("ISR", 376),
"IT": ("ITA", 380),
"JM": ("JAM", 388),
"JP": ("JPN", 392),
"JE": ("JEY", 832),
"JO": ("JOR", 400),
"KZ": ("KAZ", 398),
"KE": ("KEN", 404),
"KI": ("KIR", 296),
"KP": ("PRK", 408),
"KR": ("KOR", 410),
"KW": ("KWT", 414),
"KG": ("KGZ", 417),
"LA": ("LAO", 418),
"LV": ("LVA", 428),
"LB": ("LBN", 422),
"LS": ("LSO", 426),
"LR": ("LBR", 430),
"LY": ("LBY", 434),
"LI": ("LIE", 438),
"LT": ("LTU", 440),
"LU": ("LUX", 442),
"MO": ("MAC", 446),
"MK": ("MKD", 807),
"MG": ("MDG", 450),
"MW": ("MWI", 454),
"MY": ("MYS", 458),
"MV": ("MDV", 462),
"ML": ("MLI", 466),
"MT": ("MLT", 470),
"MH": ("MHL", 584),
"MQ": ("MTQ", 474),
"MR": ("MRT", 478),
"MU": ("MUS", 480),
"YT": ("MYT", 175),
"MX": ("MEX", 484),
"FM": ("FSM", 583),
"MD": ("MDA", 498),
"MC": ("MCO", 492),
"MN": ("MNG", 496),
"ME": ("MNE", 499),
"MS": ("MSR", 500),
"MA": ("MAR", 504),
"MZ": ("MOZ", 508),
"MM": ("MMR", 104),
"NA": ("NAM", 516),
"NR": ("NRU", 520),
"NP": ("NPL", 524),
"NL": ("NLD", 528),
"NC": ("NCL", 540),
"NZ": ("NZL", 554),
"NI": ("NIC", 558),
"NE": ("NER", 562),
"NG": ("NGA", 566),
"NU": ("NIU", 570),
"NF": ("NFK", 574),
"MP": ("MNP", 580),
"NO": ("NOR", 578),
"OM": ("OMN", 512),
"PK": ("PAK", 586),
"PW": ("PLW", 585),
"PS": ("PSE", 275),
"PA": ("PAN", 591),
"PG": ("PNG", 598),
"PY": ("PRY", 600),
"PE": ("PER", 604),
"PH": ("PHL", 608),
"PN": ("PCN", 612),
"PL": ("POL", 616),
"PT": ("PRT", 620),
"PR": ("PRI", 630),
"QA": ("QAT", 634),
"RE": ("REU", 638),
"RO": ("ROU", 642),
"RU": ("RUS", 643),
"RW": ("RWA", 646),
"BL": ("BLM", 652),
"SH": ("SHN", 654),
"KN": ("KNA", 659),
"LC": ("LCA", 662),
"MF": ("MAF", 663),
"PM": ("SPM", 666),
"VC": ("VCT", 670),
"WS": ("WSM", 882),
"SM": ("SMR", 674),
"ST": ("STP", 678),
"SA": ("SAU", 682),
"SN": ("SEN", 686),
"RS": ("SRB", 688),
"SC": ("SYC", 690),
"SL": ("SLE", 694),
"SG": ("SGP", 702),
"SX": ("SXM", 534),
"SK": ("SVK", 703),
"SI": ("SVN", 705),
"SB": ("SLB", 90),
"SO": ("SOM", 706),
"ZA": ("ZAF", 710),
"GS": ("SGS", 239),
"SS": ("SSD", 728),
"ES": ("ESP", 724),
"LK": ("LKA", 144),
"SD": ("SDN", 729),
"SR": ("SUR", 740),
"SJ": ("SJM", 744),
"SZ": ("SWZ", 748),
"SE": ("SWE", 752),
"CH": ("CHE", 756),
"SY": ("SYR", 760),
"TW": ("TWN", 158),
"TJ": ("TJK", 762),
"TZ": ("TZA", 834),
"TH": ("THA", 764),
"TL": ("TLS", 626),
"TG": ("TGO", 768),
"TK": ("TKL", 772),
"TO": ("TON", 776),
"TT": ("TTO", 780),
"TN": ("TUN", 788),
"TR": ("TUR", 792),
"TM": ("TKM", 795),
"TC": ("TCA", 796),
"TV": ("TUV", 798),
"UG": ("UGA", 800),
"UA": ("UKR", 804),
"AE": ("ARE", 784),
"GB": ("GBR", 826),
"UM": ("UMI", 581),
"US": ("USA", 840),
"UY": ("URY", 858),
"UZ": ("UZB", 860),
"VU": ("VUT", 548),
"VE": ("VEN", 862),
"VN": ("VNM", 704),
"VG": ("VGB", 92),
"VI": ("VIR", 850),
"WF": ("WLF", 876),
"EH": ("ESH", 732),
"YE": ("YEM", 887),
"ZM": ("ZMB", 894),
"ZW": ("ZWE", 716),
}
def self_generate(
output_filename, filename='iso3166-1.csv'): # pragma: no cover
"""
The following code can be used for self-generation of this file.
It requires a UTF-8 CSV file containing the short ISO name and two letter
country code as the first two columns.
"""
import csv
import re
countries = []
alt_codes = []
with open(filename, 'rb') as csv_file:
for row in csv.reader(csv_file):
name = row[0].decode('utf-8').rstrip('*')
name = re.sub(r'\(the\)', '', name)
if name:
countries.append((name, row[1].decode('utf-8')))
alt_codes.append((
row[1].decode('utf-8'),
row[2].decode('utf-8'),
int(row[3]),
))
with open(__file__, 'r') as source_file:
contents = source_file.read()
# Write countries.
bits = re.match(
'(.*\nCOUNTRIES = \{\n)(.*?)(\n\}.*)', contents, re.DOTALL).groups()
country_list = []
for name, code in countries:
name = name.replace('"', r'\"').strip()
country_list.append(
' "{code}": _("{name}"),'.format(name=name, code=code))
content = bits[0]
content += '\n'.join(country_list).encode('utf-8')
# Write alt codes.
alt_bits = re.match(
'(.*\nALT_CODES = \{\n)(.*)(\n\}.*)', bits[2], re.DOTALL).groups()
alt_list = []
for code, code3, codenum in alt_codes:
name = name.replace('"', r'\"').strip()
alt_list.append(
' "{code}": ("{code3}", {codenum}),'.format(
code=code, code3=code3, codenum=codenum))
content += alt_bits[0]
content += '\n'.join(alt_list).encode('utf-8')
content += alt_bits[2]
# Generate file.
with open(output_filename, 'wb') as output_file:
output_file.write(content)
return countries
def check_flags(verbosity=1):
files = {}
this_dir = os.path.dirname(__file__)
for path in glob.glob(os.path.join(this_dir, 'static', 'flags', '*.gif')):
files[os.path.basename(os.path.splitext(path)[0]).upper()] = path
flags_missing = set(COUNTRIES) - set(files)
if flags_missing: # pragma: no cover
print("The following country codes are missing a flag:")
for code in sorted(flags_missing):
print(" {0} ({1})".format(code, COUNTRIES[code]))
elif verbosity: # pragma: no cover
print("All country codes have flags. :)")
code_missing = set(files) - set(COUNTRIES)
# Special-case EU and __
for special_code in ('EU', '__'):
code_missing.discard(special_code)
if code_missing: # pragma: no cover
print("")
print("The following flags don't have a matching country code:")
for path in sorted(code_missing):
print(" {0}".format(path))
def check_common_names():
common_names_missing = set(COMMON_NAMES) - set(COUNTRIES)
if common_names_missing: # pragma: no cover
print("")
print(
"The following common names do not match an official country "
"code:")
for code in sorted(common_names_missing):
print(" {0}".format(code))
if __name__ == '__main__': # pragma: no cover
countries = self_generate(__file__)
print('Wrote {0} countries.'.format(len(countries)))
print("")
check_flags()
check_common_names()
| 27.489198 | 79 | 0.46045 | [
"MIT"
] | Bounder/django-countries | django_countries/data.py | 17,818 | Python |
from datetime import datetime
from jinja2 import Markup
from flask import current_app
class _moment(object):
@staticmethod
def include_moment(version='2.5.1', local_js=None):
js = ''
if local_js is not None:
js = '<script src="%s"></script>\n' % local_js
elif version is not None:
js = '<script src="//cdnjs.cloudflare.com/ajax/libs/' \
'moment.js/%s/moment-with-langs.min.js"></script>\n' % version
return Markup('''%s<script>
function flask_moment_render(elem) {
$(elem).text(eval('moment("' + $(elem).data('timestamp') + '").' + $(elem).data('format') + ';'));
$(elem).removeClass('flask-moment');
}
function flask_moment_render_all() {
$('.flask-moment').each(function() {
flask_moment_render(this);
if ($(this).data('refresh')) {
(function(elem, interval) { setInterval(function() { flask_moment_render(elem) }, interval); })(this, $(this).data('refresh'));
}
})
}
$(document).ready(function() {
flask_moment_render_all();
});
</script>''' % js)
@staticmethod
def include_jquery(version='2.1.0', local_js=None):
js = ''
if local_js is not None:
js = '<script src="%s"></script>\n' % local_js
else:
js = ('<script src="//code.jquery.com/' +
'jquery-%s.min.js"></script>') % version
return Markup(js)
@staticmethod
def lang(language):
return Markup('<script>\nmoment.lang("%s");\n</script>' % language)
def __init__(self, timestamp=None, local=False):
if timestamp is None:
timestamp = datetime.utcnow()
self.timestamp = timestamp
self.local = local
def _timestamp_as_iso_8601(self, timestamp):
tz = ''
if not self.local:
tz = 'Z'
return timestamp.strftime('%Y-%m-%dT%H:%M:%S' + tz)
def _render(self, format, refresh=False):
t = self._timestamp_as_iso_8601(self.timestamp)
return Markup(('<span class="flask-moment" data-timestamp="%s" ' +
'data-format="%s" data-refresh="%d">%s</span>') %
(t, format, int(refresh) * 60000, t))
def format(self, fmt, refresh=False):
return self._render("format('%s')" % fmt, refresh)
def fromNow(self, no_suffix=False, refresh=False):
return self._render("fromNow(%s)" % int(no_suffix), refresh)
def fromTime(self, timestamp, no_suffix=False, refresh=False):
return self._render("from(moment('%s'),%s)" %
(self._timestamp_as_iso_8601(timestamp),
int(no_suffix)), refresh)
def calendar(self, refresh=False):
return self._render("calendar()", refresh)
def valueOf(self, refresh=False):
return self._render("valueOf()", refresh)
def unix(self, refresh=False):
return self._render("unix()", refresh)
class Moment(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['moment'] = _moment
app.context_processor(self.context_processor)
@staticmethod
def context_processor():
return {
'moment': current_app.extensions['moment']
}
def create(self, timestamp=None):
return current_app.extensions['moment'](timestamp)
| 33.342857 | 139 | 0.581834 | [
"MIT"
] | ForgottenKahz/CloudOPC | venv/Lib/site-packages/flask_moment.py | 3,501 | Python |
import pandas as pd
pima=pd.read_csv('pima-indians-diabetes.csv',encoding="shift-jis")
pima.columns=['pregnant','plasmaGlucose','bloodP','skinThick','serumInsulin','weight','pedigree','age','diabetes']
from sklearn.model_selection import train_test_split
y = pima['diabetes']
X=pima.drop(['diabetes'],axis=1)
nh = 4
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=54,shuffle=True)
from sklearn_extensions.extreme_learning_machines.elm import GenELMClassifier
from sklearn_extensions.extreme_learning_machines.random_layer import RBFRandomLayer, MLPRandomLayer
srhl_tanh = MLPRandomLayer(n_hidden=nh, activation_func='tanh')
srhl_rbf = RBFRandomLayer(n_hidden=nh*2, rbf_width=0.1, random_state=0)
clf1=GenELMClassifier(hidden_layer=srhl_tanh)
clf1.fit(X_train,y_train)
print(clf1.score(X_test,y_test))
'''
dic=dict(zip(X.columns,clf.feature_importances_))
for item in sorted(dic.items(), key=lambda x: x[1], reverse=True):
print(item[0],round(item[1],4))
'''
| 47.238095 | 114 | 0.794355 | [
"MIT"
] | ytakefuji/machine-learning | elm_pima.py | 992 | Python |
import unittest
from tori.decorator.common import *
class TestDecoratorCommonSingletonClass(unittest.TestCase):
""" Test the 'singleton' decorator. """
class DummyTest(object):
def __init__(self):
self.number = 0
def take_action(self):
self.number += 1
def get_number(self):
return self.number
def test_positive_without_instance_attr(self):
""" Test if the target class without a singleton attribute. """
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest): pass
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
# Test for the type.
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
# Test if it is working. (case #1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 1)
# Test if it is working. (case #n)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 2)
def test_positive_using_decorator_with_primitive_parameters(self):
""" Test if the target class without a singleton attribute but using a decorator with primitive parameters. """
try:
@singleton(10)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, init_number):
super(self.__class__, self).__init__()
self.number = init_number
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
# Test for the type.
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
# Test if it is working. (case #1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 11)
# Test if it is working. (case #n)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 12)
def test_positive_for_normal_singleton_with_parameters(self):
""" Positive test for @singleton with parameters provided for the constructor """
try:
class SampleDependencyInjection(object): pass
sample_di = SampleDependencyInjection()
@singleton(sample_di)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_injection):
super(self.__class__, self).__init__()
self.dependency_injection = dependency_injection
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
# Test for the type.
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
# Test if it is working. (case #1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 1)
# Test if it is working. (case #n)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 2)
# Test if the dependency injection is working.
self.assertIsInstance(SuperDummyClass.instance().dependency_injection, SampleDependencyInjection)
def test_negative_for_normal_singleton_with_class_reference(self):
""" Negative test for @singleton with class_reference provided for the constructor """
# Note that this test case shows the limitation of the decorator which
# can't take a class reference as a parameter. Strongly recommend to
# use @singleton_with as it is more powerful.
try:
class SampleDependencyInjection(object): pass
@singleton(SampleDependencyInjection)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_injection):
super(self.__class__, self).__init__()
self.dependency_injection = dependency_injection
self.assertTrue(False, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known-yet-unexpected exception.')
except TypeError:
self.assertTrue(True, 'Singleton Class: Failed the initialization with expected exception.')
def test_positive_for_singleton_with(self):
""" Positive test for @singleton_with(*args, **kwargs) """
# Note that this test case shows the limitation of the decorator which
# can't take a class reference as a parameter. Strongly recommend to
# use @singleton_with as it is more powerful.
try:
class SampleDependencyInjection(object): pass
@singleton_with(SampleDependencyInjection)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_injection):
super(self.__class__, self).__init__()
self.dependency_injection = dependency_injection()
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
# Test for the type.
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
# Test if it is working. (case #1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 1)
# Test if it is working. (case #n)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 2)
# Test if the dependency injection is working.
self.assertIsInstance(SuperDummyClass.instance().dependency_injection, SampleDependencyInjection)
def test_negative_with_existed_singleton_instance(self):
""" Test if the target class is with null singleton attribute. """
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
_singleton_instance = None
def __init__(self):
# Use `self.__class__` to call the parent class' constructor.
super(self.__class__, self).__init__()
self.assertTrue(False, 'Singleton Class: Passed the initialization unexpectedly.')
except SingletonInitializationException:
self.assertTrue(True, 'Singleton Class: Failed the initialization with expected exception.')
def test_negative_with_unexpected_instance_attr(self):
""" Test if the target class has already had an attribute `_singleton_instance` but it is not compatible. """
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
_singleton_instance = {}
def __init__(self):
# Use `self.__class__` to call the parent class' constructor.
super(self.__class__, self).__init__()
self.assertTrue(False, 'Singleton Class: Passed the initialization unexpectedly.')
except SingletonInitializationException:
self.assertTrue(True, 'Singleton Class: Failed the initialization with expected exception.')
if __name__ == '__main__':
unittest.main() | 52.703226 | 119 | 0.667891 | [
"MIT"
] | shiroyuki/Tori | test/ut/test_decorator_common_singleton.py | 8,169 | Python |
from types import BeaverException, Variable, Collection, EmptyCollection, updated_context
from copy import deepcopy as copy
class Statement(object):
'''A generic statement containing any number of variable parts.'''
def __init__(self, subject, verb_objects):
self.subject = subject
self.verb_objects = [(v, list(o)) for v, o in verb_objects]
def __str__(self):
return '%s %s .' % (str(self.subject),
' ; '.join(['%s %s' % (str(verb),
', '.join([str(o) for o in objects]))
for verb, objects in self.verb_objects])
)
def __repr__(self): return str(self)
def __eq__(self, x): return str(self) == str(x)
def replace(self, *varsets):
'''Checks each part of the statement against defined variables. If any
matches are found, the statement is updated. If the statement is a function
call, a new set of statements is returned; otherwise, None is returned.'''
matched = False
# check for single variables in statement
subj = self.subject
if isinstance(subj, Variable):
result, new_match = def_match(subj, varsets)
if result:
self.subject = new_match
return self.replace(*varsets)
for n, (verb, objects) in enumerate(self.verb_objects):
if isinstance(verb, Variable):
result, new_match = def_match(verb, varsets)
if result:
v, o = self.verb_objects[n]
self.verb_objects[n] = (new_match, o)
return self.replace(*varsets)
for m, obj in enumerate(objects):
if isinstance(obj, Variable):
result, new_match = def_match(obj, varsets)
if result:
objects[m] = new_match
return self.replace(*varsets)
return None
def match(given, definition):
'''Returns true if a given argument matches the definition.'''
if isinstance(definition, Variable): return True
return definition == given
def def_match(part, varsets):
matched = False
for varset in varsets:
if matched: break
if part in varset:
defs = varset[part]
for (pattern, definition) in defs:
if matched: break
if part == definition: continue
if pattern is None:
definition = copy(definition)
# a match was found; replace the variable with its definition
if isinstance(definition, tuple): definition = list(definition)
return (True, definition)
return (False, None)
| 39.973333 | 90 | 0.527352 | [
"MIT"
] | bendmorris/beaver | lib/statement.py | 2,998 | Python |
import copy
import numpy as np
import torch
from mmpose.core import (aggregate_results, get_group_preds,
get_multi_stage_outputs)
def test_get_multi_stage_outputs():
fake_outputs = [torch.zeros((1, 4, 2, 2))]
fake_flip_outputs = [torch.ones((1, 4, 2, 2))]
# outputs_flip
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=4, with_heatmaps=[False],
with_ae=[True])
assert heatmaps == []
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=2, with_heatmaps=[True],
with_ae=[True])
assert len(heatmaps) == 1
flip_index = [1, 0]
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True],
with_ae=[True], flip_index=flip_index)
assert len(heatmaps) == 2
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
tag_per_joint=False,
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True],
with_ae=[True], flip_index=flip_index)
assert len(heatmaps) == 2
# with heatmaps & with ae
fake_outputs = [torch.zeros((1, 4, 2, 2)), torch.ones((1, 2, 4, 4))]
fake_flip_outputs = [torch.ones((1, 4, 2, 2)), torch.ones((1, 2, 4, 4))]
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=2, with_heatmaps=[True, False],
with_ae=[True, True])
assert torch.allclose(heatmaps[0], torch.tensor(0.))
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True, True],
with_ae=[True, False])
assert torch.allclose(heatmaps[0], torch.tensor(0.5))
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True, False],
with_ae=[True, False], flip_index=flip_index)
assert torch.allclose(heatmaps[0], torch.tensor(0.))
# size_projected
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=2, with_heatmaps=[True, True],
with_ae=[True, False],
size_projected=(8, 8))
assert heatmaps[0].shape == torch.Size([1, 2, 8, 8])
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True, True],
with_ae=[True, False],
align_corners=True)
assert torch.allclose(heatmaps[0], torch.tensor(0.5))
def test_aggregate_results():
fake_heatmaps = [torch.zeros((1, 2, 2, 2))]
fake_tags = [torch.zeros((1, 2, 2, 2))]
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=None, tags_list=[],
heatmaps=fake_heatmaps, tags=fake_tags,
test_scale_factor=[1], project2image=True,
flip_test=False)
assert torch.allclose(aggregated_heatmaps, torch.tensor(0.))
fake_aggr_heatmaps = torch.ones(1, 2, 2, 2)
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1],
project2image=True, flip_test=False)
assert torch.allclose(aggregated_heatmaps, torch.tensor(1.))
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1],
project2image=True, flip_test=False,
align_corners=True)
assert torch.allclose(aggregated_heatmaps, torch.tensor(1.))
fake_heatmaps = [torch.zeros((1, 2, 2, 2)), torch.ones((1, 2, 2, 2))]
fake_aggr_heatmaps = torch.ones(1, 2, 4, 4)
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1],
project2image=False, flip_test=True)
assert aggregated_heatmaps.shape == torch.Size((1, 2, 4, 4))
aggregated_heatmaps, tags_list = \
aggregate_results(scale=2, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1, 2],
project2image=False, flip_test=True)
assert aggregated_heatmaps.shape == torch.Size((1, 2, 4, 4))
def test_get_group_preds():
fake_grouped_joints = [np.array([[[0, 0], [1, 1]]])]
results = get_group_preds(
fake_grouped_joints,
center=np.array([0, 0]),
scale=np.array([1, 1]),
heatmap_size=np.array([2, 2]))
assert not results == []
results = get_group_preds(
fake_grouped_joints,
center=np.array([0, 0]),
scale=np.array([1, 1]),
heatmap_size=np.array([2, 2]),
use_udp=True)
assert not results == []
| 49.06015 | 78 | 0.546973 | [
"Apache-2.0"
] | jcwon0/BlurHPE | tests/test_evaluation/test_bottom_up_eval.py | 6,525 | Python |
import typing
from typing import List
from quarkchain.core import (
CrossShardTransactionList,
MinorBlock,
MinorBlockHeader,
RootBlock,
TransactionReceipt,
Log,
FixedSizeBytesSerializer,
biguint,
Constant,
)
from quarkchain.core import (
TypedTransaction,
Optional,
PrependedSizeBytesSerializer,
PrependedSizeListSerializer,
Serializable,
Address,
Branch,
TokenBalanceMap,
PrependedSizeMapSerializer,
)
from quarkchain.core import (
hash256,
uint16,
uint32,
uint64,
uint256,
boolean,
signature65,
)
# RPCs to initialize a cluster
class Ping(Serializable):
FIELDS = [
("id", PrependedSizeBytesSerializer(4)),
("full_shard_id_list", PrependedSizeListSerializer(4, uint32)),
("root_tip", Optional(RootBlock)), # Initialize ShardState if not None
]
def __init__(self, id, full_shard_id_list, root_tip):
""" Empty full_shard_id_list means root """
if isinstance(id, bytes):
self.id = id
else:
self.id = bytes(id, "ascii")
self.full_shard_id_list = full_shard_id_list
self.root_tip = root_tip
class Pong(Serializable):
FIELDS = [
("id", PrependedSizeBytesSerializer(4)),
("full_shard_id_list", PrependedSizeListSerializer(4, uint32)),
]
def __init__(self, id, full_shard_id_list):
""" Empty slave_id and full_shard_id_list means root """
if isinstance(id, bytes):
self.id = id
else:
self.id = bytes(id, "ascii")
self.full_shard_id_list = full_shard_id_list
class SlaveInfo(Serializable):
FIELDS = [
("id", PrependedSizeBytesSerializer(4)),
("host", PrependedSizeBytesSerializer(4)),
("port", uint16),
("full_shard_id_list", PrependedSizeListSerializer(4, uint32)),
]
def __init__(self, id, host, port, full_shard_id_list):
self.id = id if isinstance(id, bytes) else bytes(id, "ascii")
self.host = host if isinstance(host, bytes) else bytes(host, "ascii")
self.port = port
self.full_shard_id_list = full_shard_id_list
class ConnectToSlavesRequest(Serializable):
""" Master instructs a slave to connect to other slaves """
FIELDS = [("slave_info_list", PrependedSizeListSerializer(4, SlaveInfo))]
def __init__(self, slave_info_list):
self.slave_info_list = slave_info_list
class ConnectToSlavesResponse(Serializable):
""" result_list must have the same size as salve_info_list in the request.
Empty result means success otherwise it would a serialized error message.
"""
FIELDS = [
("result_list", PrependedSizeListSerializer(4, PrependedSizeBytesSerializer(4)))
]
def __init__(self, result_list):
self.result_list = result_list
class ArtificialTxConfig(Serializable):
FIELDS = [("target_root_block_time", uint32), ("target_minor_block_time", uint32)]
def __init__(self, target_root_block_time, target_minor_block_time):
self.target_root_block_time = target_root_block_time
self.target_minor_block_time = target_minor_block_time
class MineRequest(Serializable):
"""Send mining instructions to slaves"""
FIELDS = [
("artificial_tx_config", ArtificialTxConfig),
("mining", boolean), # False to halt mining
]
def __init__(self, artificial_tx_config, mining):
self.artificial_tx_config = artificial_tx_config
self.mining = mining
class MineResponse(Serializable):
FIELDS = [("error_code", uint32)]
def __init__(self, error_code):
self.error_code = error_code
class GenTxRequest(Serializable):
"""Generate transactions for loadtesting"""
FIELDS = [
("num_tx_per_shard", uint32),
("x_shard_percent", uint32), # [0, 100]
("tx", TypedTransaction), # sample tx
]
def __init__(self, num_tx_per_shard, x_shard_percent, tx):
self.num_tx_per_shard = num_tx_per_shard
self.x_shard_percent = x_shard_percent
self.tx = tx
class GenTxResponse(Serializable):
FIELDS = [("error_code", uint32)]
def __init__(self, error_code):
self.error_code = error_code
# Virtual connection management
class CreateClusterPeerConnectionRequest(Serializable):
""" Broadcast to the cluster and announce that a peer connection is created
Assume always succeed.
"""
FIELDS = [("cluster_peer_id", uint64)]
def __init__(self, cluster_peer_id):
self.cluster_peer_id = cluster_peer_id
class CreateClusterPeerConnectionResponse(Serializable):
FIELDS = [("error_code", uint32)]
def __init__(self, error_code):
self.error_code = error_code
class DestroyClusterPeerConnectionCommand(Serializable):
""" Broadcast to the cluster and announce that a peer connection is lost
As a contract, the master will not send traffic after the command.
"""
FIELDS = [("cluster_peer_id", uint64)]
def __init__(self, cluster_peer_id):
self.cluster_peer_id = cluster_peer_id
# RPCs to lookup data from shards (master -> slaves)
class GetMinorBlockRequest(Serializable):
FIELDS = [
("branch", Branch),
("minor_block_hash", hash256),
("height", uint64),
("need_extra_info", boolean),
]
def __init__(self, branch, minor_block_hash=None, height=0, need_extra_info=False):
self.branch = branch
self.minor_block_hash = minor_block_hash if minor_block_hash else bytes(32)
self.height = height
self.need_extra_info = need_extra_info
class MinorBlockExtraInfo(Serializable):
FIELDS = [
("effective_difficulty", biguint),
("posw_mineable_blocks", uint16),
("posw_mined_blocks", uint16),
]
def __init__(
self,
effective_difficulty: int,
posw_mineable_blocks: int,
posw_mined_blocks: int,
):
self.effective_difficulty = effective_difficulty
self.posw_mineable_blocks = posw_mineable_blocks
self.posw_mined_blocks = posw_mined_blocks
class GetMinorBlockResponse(Serializable):
FIELDS = [
("error_code", uint32),
("minor_block", MinorBlock),
("extra_info", Optional(MinorBlockExtraInfo)),
]
def __init__(self, error_code, minor_block, extra_info=None):
self.error_code = error_code
self.minor_block = minor_block
self.extra_info = extra_info
class GetTransactionRequest(Serializable):
FIELDS = [("tx_hash", hash256), ("branch", Branch)]
def __init__(self, tx_hash, branch):
self.tx_hash = tx_hash
self.branch = branch
class GetTransactionResponse(Serializable):
FIELDS = [("error_code", uint32), ("minor_block", MinorBlock), ("index", uint32)]
def __init__(self, error_code, minor_block, index):
self.error_code = error_code
self.minor_block = minor_block
self.index = index
class ExecuteTransactionRequest(Serializable):
FIELDS = [
("tx", TypedTransaction),
("from_address", Address),
("block_height", Optional(uint64)),
]
def __init__(self, tx, from_address, block_height: typing.Optional[int]):
self.tx = tx
self.from_address = from_address
self.block_height = block_height
class ExecuteTransactionResponse(Serializable):
FIELDS = [("error_code", uint32), ("result", PrependedSizeBytesSerializer(4))]
def __init__(self, error_code, result):
self.error_code = error_code
self.result = result
class GetTransactionReceiptRequest(Serializable):
FIELDS = [("tx_hash", hash256), ("branch", Branch)]
def __init__(self, tx_hash, branch):
self.tx_hash = tx_hash
self.branch = branch
class GetTransactionReceiptResponse(Serializable):
FIELDS = [
("error_code", uint32),
("minor_block", MinorBlock),
("index", uint32),
("receipt", TransactionReceipt),
]
def __init__(self, error_code, minor_block, index, receipt):
self.error_code = error_code
self.minor_block = minor_block
self.index = index
self.receipt = receipt
class GetTransactionListByAddressRequest(Serializable):
FIELDS = [
("address", Address),
("transfer_token_id", Optional(uint64)),
("start", PrependedSizeBytesSerializer(4)),
("limit", uint32),
]
def __init__(self, address, transfer_token_id, start, limit):
self.address = address
self.transfer_token_id = transfer_token_id
self.start = start
self.limit = limit
class TransactionDetail(Serializable):
FIELDS = [
("tx_hash", hash256),
("nonce", uint64),
("from_address", Address),
("to_address", Optional(Address)),
("value", uint256),
("block_height", uint64),
("timestamp", uint64), # block timestamp
("success", boolean),
("gas_token_id", uint64),
("transfer_token_id", uint64),
("is_from_root_chain", boolean),
]
def __init__(
self,
tx_hash,
nonce,
from_address,
to_address,
value,
block_height,
timestamp,
success,
gas_token_id,
transfer_token_id,
is_from_root_chain,
):
self.tx_hash = tx_hash
self.nonce = nonce
self.from_address = from_address
self.to_address = to_address
self.value = value
self.block_height = block_height
self.timestamp = timestamp
self.success = success
self.gas_token_id = gas_token_id
self.transfer_token_id = transfer_token_id
self.is_from_root_chain = is_from_root_chain
class GetTransactionListByAddressResponse(Serializable):
FIELDS = [
("error_code", uint32),
("tx_list", PrependedSizeListSerializer(4, TransactionDetail)),
("next", PrependedSizeBytesSerializer(4)),
]
def __init__(self, error_code, tx_list, next):
self.error_code = error_code
self.tx_list = tx_list
self.next = next
class GetAllTransactionsRequest(Serializable):
FIELDS = [
("branch", Branch),
("start", PrependedSizeBytesSerializer(4)),
("limit", uint32),
]
def __init__(self, branch, start, limit):
self.branch = branch
self.start = start
self.limit = limit
class GetAllTransactionsResponse(Serializable):
FIELDS = [
("error_code", uint32),
("tx_list", PrependedSizeListSerializer(4, TransactionDetail)),
("next", PrependedSizeBytesSerializer(4)),
]
def __init__(self, error_code, tx_list, next):
self.error_code = error_code
self.tx_list = tx_list
self.next = next
# RPCs to update blockchains
# master -> slave
class AddRootBlockRequest(Serializable):
""" Add root block to each slave
"""
FIELDS = [("root_block", RootBlock), ("expect_switch", boolean)]
def __init__(self, root_block, expect_switch):
self.root_block = root_block
self.expect_switch = expect_switch
class AddRootBlockResponse(Serializable):
FIELDS = [("error_code", uint32), ("switched", boolean)]
def __init__(self, error_code, switched):
self.error_code = error_code
self.switched = switched
class EcoInfo(Serializable):
""" Necessary information for master to decide the best block to mine """
FIELDS = [
("branch", Branch),
("height", uint64),
("coinbase_amount", uint256),
("difficulty", biguint),
("unconfirmed_headers_coinbase_amount", uint256),
]
def __init__(
self,
branch,
height,
coinbase_amount,
difficulty,
unconfirmed_headers_coinbase_amount,
):
self.branch = branch
self.height = height
self.coinbase_amount = coinbase_amount
self.difficulty = difficulty
self.unconfirmed_headers_coinbase_amount = unconfirmed_headers_coinbase_amount
class GetEcoInfoListRequest(Serializable):
FIELDS = []
def __init__(self):
pass
class GetEcoInfoListResponse(Serializable):
FIELDS = [
("error_code", uint32),
("eco_info_list", PrependedSizeListSerializer(4, EcoInfo)),
]
def __init__(self, error_code, eco_info_list):
self.error_code = error_code
self.eco_info_list = eco_info_list
class GetNextBlockToMineRequest(Serializable):
FIELDS = [
("branch", Branch),
("address", Address),
("artificial_tx_config", ArtificialTxConfig),
]
def __init__(self, branch, address, artificial_tx_config):
self.branch = branch
self.address = address
self.artificial_tx_config = artificial_tx_config
class GetNextBlockToMineResponse(Serializable):
FIELDS = [("error_code", uint32), ("block", MinorBlock)]
def __init__(self, error_code, block):
self.error_code = error_code
self.block = block
class AddMinorBlockRequest(Serializable):
"""For adding blocks mined through JRPC"""
FIELDS = [("minor_block_data", PrependedSizeBytesSerializer(4))]
def __init__(self, minor_block_data):
self.minor_block_data = minor_block_data
class AddMinorBlockResponse(Serializable):
FIELDS = [("error_code", uint32)]
def __init__(self, error_code):
self.error_code = error_code
class CheckMinorBlockRequest(Serializable):
"""For adding blocks mined through JRPC"""
FIELDS = [("minor_block_header", MinorBlockHeader)]
def __init__(self, minor_block_header):
self.minor_block_header = minor_block_header
class CheckMinorBlockResponse(Serializable):
FIELDS = [("error_code", uint32)]
def __init__(self, error_code):
self.error_code = error_code
class HeadersInfo(Serializable):
FIELDS = [
("branch", Branch),
("header_list", PrependedSizeListSerializer(4, MinorBlockHeader)),
]
def __init__(self, branch, header_list):
self.branch = branch
self.header_list = header_list
class GetUnconfirmedHeadersRequest(Serializable):
"""To collect minor block headers to build a new root block"""
FIELDS = []
def __init__(self):
pass
class GetUnconfirmedHeadersResponse(Serializable):
FIELDS = [
("error_code", uint32),
("headers_info_list", PrependedSizeListSerializer(4, HeadersInfo)),
]
def __init__(self, error_code, headers_info_list):
self.error_code = error_code
self.headers_info_list = headers_info_list
class GetAccountDataRequest(Serializable):
FIELDS = [("address", Address), ("block_height", Optional(uint64))]
def __init__(self, address: Address, block_height: typing.Optional[int] = None):
self.address = address
self.block_height = block_height
class AccountBranchData(Serializable):
FIELDS = [
("branch", Branch),
("transaction_count", uint256),
("token_balances", TokenBalanceMap),
("is_contract", boolean),
("posw_mineable_blocks", uint16),
("mined_blocks", uint16),
]
def __init__(
self,
branch,
transaction_count,
token_balances,
is_contract,
mined_blocks=0,
posw_mineable_blocks=0,
):
self.branch = branch
self.transaction_count = transaction_count
self.token_balances = token_balances
self.is_contract = is_contract
self.mined_blocks = mined_blocks
self.posw_mineable_blocks = posw_mineable_blocks
class GetAccountDataResponse(Serializable):
FIELDS = [
("error_code", uint32),
("account_branch_data_list", PrependedSizeListSerializer(4, AccountBranchData)),
]
def __init__(self, error_code, account_branch_data_list):
self.error_code = error_code
self.account_branch_data_list = account_branch_data_list
class AddTransactionRequest(Serializable):
FIELDS = [("tx", TypedTransaction)]
def __init__(self, tx):
self.tx = tx
class AddTransactionResponse(Serializable):
FIELDS = [("error_code", uint32)]
def __init__(self, error_code):
self.error_code = error_code
class ShardStats(Serializable):
FIELDS = [
("branch", Branch),
("height", uint64),
("difficulty", biguint),
("coinbase_address", Address),
("timestamp", uint64),
("tx_count60s", uint32),
("pending_tx_count", uint32),
("total_tx_count", uint32),
("block_count60s", uint32),
("stale_block_count60s", uint32),
("last_block_time", uint32),
]
def __init__(
self,
branch: Branch,
height: int,
difficulty: int,
coinbase_address: Address,
timestamp: int,
tx_count60s: int,
pending_tx_count: int,
total_tx_count: int,
block_count60s: int,
stale_block_count60s: int,
last_block_time: int,
):
self.branch = branch
self.height = height
self.difficulty = difficulty
self.coinbase_address = coinbase_address
self.timestamp = timestamp
self.tx_count60s = tx_count60s
self.pending_tx_count = pending_tx_count
self.total_tx_count = total_tx_count
self.block_count60s = block_count60s
self.stale_block_count60s = stale_block_count60s
self.last_block_time = last_block_time
class RootBlockSychronizerStats(Serializable):
FIELDS = [
("headers_downloaded", uint64),
("blocks_downloaded", uint64),
("blocks_added", uint64),
("ancestor_not_found_count", uint64),
("ancestor_lookup_requests", uint64),
]
def __init__(
self,
headers_downloaded=0,
blocks_downloaded=0,
blocks_added=0,
ancestor_not_found_count=0,
ancestor_lookup_requests=0,
):
self.headers_downloaded = headers_downloaded
self.blocks_downloaded = blocks_downloaded
self.blocks_added = blocks_added
self.ancestor_not_found_count = ancestor_not_found_count
self.ancestor_lookup_requests = ancestor_lookup_requests
class SyncMinorBlockListRequest(Serializable):
FIELDS = [
("minor_block_hash_list", PrependedSizeListSerializer(4, hash256)),
("branch", Branch),
("cluster_peer_id", uint64),
]
def __init__(self, minor_block_hash_list, branch, cluster_peer_id):
self.minor_block_hash_list = minor_block_hash_list
self.branch = branch
self.cluster_peer_id = cluster_peer_id
class SyncMinorBlockListResponse(Serializable):
FIELDS = [
("error_code", uint32),
("block_coinbase_map", PrependedSizeMapSerializer(4, hash256, TokenBalanceMap)),
("shard_stats", Optional(ShardStats)),
]
def __init__(self, error_code, block_coinbase_map=None, shard_stats=None):
self.error_code = error_code
self.block_coinbase_map = block_coinbase_map or {}
self.shard_stats = shard_stats
# slave -> master
class AddMinorBlockHeaderRequest(Serializable):
""" Notify master about a successfully added minor block.
Piggyback the ShardStats in the same request.
"""
FIELDS = [
("minor_block_header", MinorBlockHeader),
("tx_count", uint32), # the total number of tx in the block
("x_shard_tx_count", uint32), # the number of xshard tx in the block
("coinbase_amount_map", TokenBalanceMap),
("shard_stats", ShardStats),
]
def __init__(
self,
minor_block_header,
tx_count,
x_shard_tx_count,
coinbase_amount_map,
shard_stats,
):
self.minor_block_header = minor_block_header
self.tx_count = tx_count
self.x_shard_tx_count = x_shard_tx_count
self.coinbase_amount_map = coinbase_amount_map
self.shard_stats = shard_stats
class AddMinorBlockHeaderResponse(Serializable):
FIELDS = [("error_code", uint32), ("artificial_tx_config", ArtificialTxConfig)]
def __init__(self, error_code, artificial_tx_config):
self.error_code = error_code
self.artificial_tx_config = artificial_tx_config
class AddMinorBlockHeaderListRequest(Serializable):
""" Notify master about a list of successfully added minor block.
Mostly used for minor block sync triggered by root block sync
"""
FIELDS = [
("minor_block_header_list", PrependedSizeListSerializer(4, MinorBlockHeader)),
("coinbase_amount_map_list", PrependedSizeListSerializer(4, TokenBalanceMap)),
]
def __init__(self, minor_block_header_list, coinbase_amount_map_list):
self.minor_block_header_list = minor_block_header_list
self.coinbase_amount_map_list = coinbase_amount_map_list
class AddMinorBlockHeaderListResponse(Serializable):
FIELDS = [("error_code", uint32)]
def __init__(self, error_code):
self.error_code = error_code
# slave -> slave
class AddXshardTxListRequest(Serializable):
FIELDS = [
("branch", Branch),
("minor_block_hash", hash256),
("tx_list", CrossShardTransactionList),
]
def __init__(self, branch, minor_block_hash, tx_list):
self.branch = branch
self.minor_block_hash = minor_block_hash
self.tx_list = tx_list
class AddXshardTxListResponse(Serializable):
FIELDS = [("error_code", uint32)]
def __init__(self, error_code):
self.error_code = error_code
class BatchAddXshardTxListRequest(Serializable):
FIELDS = [
(
"add_xshard_tx_list_request_list",
PrependedSizeListSerializer(4, AddXshardTxListRequest),
)
]
def __init__(self, add_xshard_tx_list_request_list):
self.add_xshard_tx_list_request_list = add_xshard_tx_list_request_list
class BatchAddXshardTxListResponse(Serializable):
FIELDS = [("error_code", uint32)]
def __init__(self, error_code):
self.error_code = error_code
class GetLogRequest(Serializable):
FIELDS = [
("branch", Branch),
("addresses", PrependedSizeListSerializer(4, Address)),
(
"topics",
PrependedSizeListSerializer(
4, PrependedSizeListSerializer(4, FixedSizeBytesSerializer(32))
),
),
("start_block", uint64),
("end_block", uint64),
]
def __init__(
self,
branch: Branch,
addresses: List[Address],
topics: List[List[bytes]],
start_block: int,
end_block: int,
):
self.branch = branch
self.addresses = addresses
self.topics = topics
self.start_block = start_block
self.end_block = end_block
class GetLogResponse(Serializable):
FIELDS = [("error_code", uint32), ("logs", PrependedSizeListSerializer(4, Log))]
def __init__(self, error_code: int, logs: List[Log]):
self.error_code = error_code
self.logs = logs
class EstimateGasRequest(Serializable):
FIELDS = [("tx", TypedTransaction), ("from_address", Address)]
def __init__(self, tx: TypedTransaction, from_address: Address):
self.tx = tx
self.from_address = from_address
class EstimateGasResponse(Serializable):
FIELDS = [("error_code", uint32), ("result", uint32)]
def __init__(self, error_code: int, result: int):
self.error_code = error_code
self.result = result
class GetStorageRequest(Serializable):
FIELDS = [
("address", Address),
("key", uint256),
("block_height", Optional(uint64)),
]
def __init__(
self, address: Address, key: int, block_height: typing.Optional[int] = None
):
self.address = address
self.key = key
self.block_height = block_height
class GetStorageResponse(Serializable):
FIELDS = [("error_code", uint32), ("result", FixedSizeBytesSerializer(32))]
def __init__(self, error_code: int, result: bytes):
self.error_code = error_code
self.result = result
class GetCodeRequest(Serializable):
FIELDS = [("address", Address), ("block_height", Optional(uint64))]
def __init__(self, address: Address, block_height: typing.Optional[int] = None):
self.address = address
self.block_height = block_height
class GetCodeResponse(Serializable):
FIELDS = [("error_code", uint32), ("result", PrependedSizeBytesSerializer(4))]
def __init__(self, error_code: int, result: bytes):
self.error_code = error_code
self.result = result
class GasPriceRequest(Serializable):
FIELDS = [("branch", Branch), ("token_id", uint64)]
def __init__(self, branch: Branch, token_id: int):
self.branch = branch
self.token_id = token_id
class GasPriceResponse(Serializable):
FIELDS = [("error_code", uint32), ("result", uint64)]
def __init__(self, error_code: int, result: int):
self.error_code = error_code
self.result = result
class GetWorkRequest(Serializable):
FIELDS = [("branch", Branch), ("coinbase_addr", Optional(Address))]
def __init__(self, branch: Branch, coinbase_addr: typing.Optional[Address]):
self.branch = branch
self.coinbase_addr = coinbase_addr
class GetWorkResponse(Serializable):
FIELDS = [
("error_code", uint32),
("header_hash", hash256),
("height", uint64),
("difficulty", biguint),
]
def __init__(
self,
error_code: int,
header_hash: bytes = bytes(Constant.HASH_LENGTH),
height: int = 0,
difficulty: int = 0,
):
self.error_code = error_code
self.header_hash = header_hash
self.height = height
self.difficulty = difficulty
class SubmitWorkRequest(Serializable):
FIELDS = [
("branch", Branch),
("header_hash", hash256),
("nonce", uint64),
("mixhash", hash256),
("signature", Optional(signature65)),
]
def __init__(
self,
branch: Branch,
header_hash: bytes,
nonce: int,
mixhash: bytes,
signature: bytes,
):
self.branch = branch
self.header_hash = header_hash
self.nonce = nonce
self.mixhash = mixhash
self.signature = signature
class SubmitWorkResponse(Serializable):
FIELDS = [("error_code", uint32), ("success", boolean)]
def __init__(self, error_code: int, success: bool):
self.error_code = error_code
self.success = success
class GetRootChainStakesRequest(Serializable):
FIELDS = [("address", Address), ("minor_block_hash", hash256)]
def __init__(self, address: Address, minor_block_hash: bytes):
self.address = address
self.minor_block_hash = minor_block_hash
class GetRootChainStakesResponse(Serializable):
FIELDS = [
("error_code", uint32),
("stakes", biguint),
("signer", FixedSizeBytesSerializer(20)),
]
def __init__(self, error_code: int, stakes: int = 0, signer: bytes = bytes(20)):
self.error_code = error_code
self.stakes = stakes
self.signer = signer
class GetTotalBalanceRequest(Serializable):
FIELDS = [
("branch", Branch),
("start", Optional(hash256)),
("token_id", uint64), # TODO: double check max token ID
("limit", uint32),
("minor_block_hash", hash256),
("root_block_hash", Optional(hash256)),
]
def __init__(
self,
branch: Branch,
start: typing.Optional[bytes],
token_id: int,
limit: int,
minor_block_hash: bytes,
root_block_hash: typing.Optional[bytes],
):
self.branch = branch
self.start = start
self.token_id = token_id
self.limit = limit
self.minor_block_hash = minor_block_hash
self.root_block_hash = root_block_hash
class GetTotalBalanceResponse(Serializable):
FIELDS = [
("error_code", uint32),
("total_balance", biguint),
("next", PrependedSizeBytesSerializer(4)),
]
def __init__(self, error_code: int, total_balance: int, next: bytes):
self.error_code = error_code
self.total_balance = total_balance
self.next = next
CLUSTER_OP_BASE = 128
class ClusterOp:
# TODO: Remove cluster op base as cluster op should be independent to p2p op
PING = 1 + CLUSTER_OP_BASE
PONG = 2 + CLUSTER_OP_BASE
CONNECT_TO_SLAVES_REQUEST = 3 + CLUSTER_OP_BASE
CONNECT_TO_SLAVES_RESPONSE = 4 + CLUSTER_OP_BASE
ADD_ROOT_BLOCK_REQUEST = 5 + CLUSTER_OP_BASE
ADD_ROOT_BLOCK_RESPONSE = 6 + CLUSTER_OP_BASE
GET_ECO_INFO_LIST_REQUEST = 7 + CLUSTER_OP_BASE
GET_ECO_INFO_LIST_RESPONSE = 8 + CLUSTER_OP_BASE
GET_NEXT_BLOCK_TO_MINE_REQUEST = 9 + CLUSTER_OP_BASE
GET_NEXT_BLOCK_TO_MINE_RESPONSE = 10 + CLUSTER_OP_BASE
GET_UNCONFIRMED_HEADERS_REQUEST = 11 + CLUSTER_OP_BASE
GET_UNCONFIRMED_HEADERS_RESPONSE = 12 + CLUSTER_OP_BASE
GET_ACCOUNT_DATA_REQUEST = 13 + CLUSTER_OP_BASE
GET_ACCOUNT_DATA_RESPONSE = 14 + CLUSTER_OP_BASE
ADD_TRANSACTION_REQUEST = 15 + CLUSTER_OP_BASE
ADD_TRANSACTION_RESPONSE = 16 + CLUSTER_OP_BASE
ADD_MINOR_BLOCK_HEADER_REQUEST = 17 + CLUSTER_OP_BASE
ADD_MINOR_BLOCK_HEADER_RESPONSE = 18 + CLUSTER_OP_BASE
ADD_XSHARD_TX_LIST_REQUEST = 19 + CLUSTER_OP_BASE
ADD_XSHARD_TX_LIST_RESPONSE = 20 + CLUSTER_OP_BASE
SYNC_MINOR_BLOCK_LIST_REQUEST = 21 + CLUSTER_OP_BASE
SYNC_MINOR_BLOCK_LIST_RESPONSE = 22 + CLUSTER_OP_BASE
ADD_MINOR_BLOCK_REQUEST = 23 + CLUSTER_OP_BASE
ADD_MINOR_BLOCK_RESPONSE = 24 + CLUSTER_OP_BASE
CREATE_CLUSTER_PEER_CONNECTION_REQUEST = 25 + CLUSTER_OP_BASE
CREATE_CLUSTER_PEER_CONNECTION_RESPONSE = 26 + CLUSTER_OP_BASE
DESTROY_CLUSTER_PEER_CONNECTION_COMMAND = 27 + CLUSTER_OP_BASE
GET_MINOR_BLOCK_REQUEST = 29 + CLUSTER_OP_BASE
GET_MINOR_BLOCK_RESPONSE = 30 + CLUSTER_OP_BASE
GET_TRANSACTION_REQUEST = 31 + CLUSTER_OP_BASE
GET_TRANSACTION_RESPONSE = 32 + CLUSTER_OP_BASE
BATCH_ADD_XSHARD_TX_LIST_REQUEST = 33 + CLUSTER_OP_BASE
BATCH_ADD_XSHARD_TX_LIST_RESPONSE = 34 + CLUSTER_OP_BASE
EXECUTE_TRANSACTION_REQUEST = 35 + CLUSTER_OP_BASE
EXECUTE_TRANSACTION_RESPONSE = 36 + CLUSTER_OP_BASE
GET_TRANSACTION_RECEIPT_REQUEST = 37 + CLUSTER_OP_BASE
GET_TRANSACTION_RECEIPT_RESPONSE = 38 + CLUSTER_OP_BASE
MINE_REQUEST = 39 + CLUSTER_OP_BASE
MINE_RESPONSE = 40 + CLUSTER_OP_BASE
GEN_TX_REQUEST = 41 + CLUSTER_OP_BASE
GEN_TX_RESPONSE = 42 + CLUSTER_OP_BASE
GET_TRANSACTION_LIST_BY_ADDRESS_REQUEST = 43 + CLUSTER_OP_BASE
GET_TRANSACTION_LIST_BY_ADDRESS_RESPONSE = 44 + CLUSTER_OP_BASE
GET_LOG_REQUEST = 45 + CLUSTER_OP_BASE
GET_LOG_RESPONSE = 46 + CLUSTER_OP_BASE
ESTIMATE_GAS_REQUEST = 47 + CLUSTER_OP_BASE
ESTIMATE_GAS_RESPONSE = 48 + CLUSTER_OP_BASE
GET_STORAGE_REQUEST = 49 + CLUSTER_OP_BASE
GET_STORAGE_RESPONSE = 50 + CLUSTER_OP_BASE
GET_CODE_REQUEST = 51 + CLUSTER_OP_BASE
GET_CODE_RESPONSE = 52 + CLUSTER_OP_BASE
GAS_PRICE_REQUEST = 53 + CLUSTER_OP_BASE
GAS_PRICE_RESPONSE = 54 + CLUSTER_OP_BASE
GET_WORK_REQUEST = 55 + CLUSTER_OP_BASE
GET_WORK_RESPONSE = 56 + CLUSTER_OP_BASE
SUBMIT_WORK_REQUEST = 57 + CLUSTER_OP_BASE
SUBMIT_WORK_RESPONSE = 58 + CLUSTER_OP_BASE
ADD_MINOR_BLOCK_HEADER_LIST_REQUEST = 59 + CLUSTER_OP_BASE
ADD_MINOR_BLOCK_HEADER_LIST_RESPONSE = 60 + CLUSTER_OP_BASE
CHECK_MINOR_BLOCK_REQUEST = 61 + CLUSTER_OP_BASE
CHECK_MINOR_BLOCK_RESPONSE = 62 + CLUSTER_OP_BASE
GET_ALL_TRANSACTIONS_REQUEST = 63 + CLUSTER_OP_BASE
GET_ALL_TRANSACTIONS_RESPONSE = 64 + CLUSTER_OP_BASE
GET_ROOT_CHAIN_STAKES_REQUEST = 65 + CLUSTER_OP_BASE
GET_ROOT_CHAIN_STAKES_RESPONSE = 66 + CLUSTER_OP_BASE
GET_TOTAL_BALANCE_REQUEST = 67 + CLUSTER_OP_BASE
GET_TOTAL_BALANCE_RESPONSE = 68 + CLUSTER_OP_BASE
CLUSTER_OP_SERIALIZER_MAP = {
ClusterOp.PING: Ping,
ClusterOp.PONG: Pong,
ClusterOp.CONNECT_TO_SLAVES_REQUEST: ConnectToSlavesRequest,
ClusterOp.CONNECT_TO_SLAVES_RESPONSE: ConnectToSlavesResponse,
ClusterOp.ADD_ROOT_BLOCK_REQUEST: AddRootBlockRequest,
ClusterOp.ADD_ROOT_BLOCK_RESPONSE: AddRootBlockResponse,
ClusterOp.GET_ECO_INFO_LIST_REQUEST: GetEcoInfoListRequest,
ClusterOp.GET_ECO_INFO_LIST_RESPONSE: GetEcoInfoListResponse,
ClusterOp.GET_NEXT_BLOCK_TO_MINE_REQUEST: GetNextBlockToMineRequest,
ClusterOp.GET_NEXT_BLOCK_TO_MINE_RESPONSE: GetNextBlockToMineResponse,
ClusterOp.ADD_MINOR_BLOCK_REQUEST: AddMinorBlockRequest,
ClusterOp.ADD_MINOR_BLOCK_RESPONSE: AddMinorBlockResponse,
ClusterOp.GET_UNCONFIRMED_HEADERS_REQUEST: GetUnconfirmedHeadersRequest,
ClusterOp.GET_UNCONFIRMED_HEADERS_RESPONSE: GetUnconfirmedHeadersResponse,
ClusterOp.ADD_MINOR_BLOCK_HEADER_REQUEST: AddMinorBlockHeaderRequest,
ClusterOp.ADD_MINOR_BLOCK_HEADER_RESPONSE: AddMinorBlockHeaderResponse,
ClusterOp.ADD_XSHARD_TX_LIST_REQUEST: AddXshardTxListRequest,
ClusterOp.ADD_XSHARD_TX_LIST_RESPONSE: AddXshardTxListResponse,
ClusterOp.GET_ACCOUNT_DATA_REQUEST: GetAccountDataRequest,
ClusterOp.GET_ACCOUNT_DATA_RESPONSE: GetAccountDataResponse,
ClusterOp.ADD_TRANSACTION_REQUEST: AddTransactionRequest,
ClusterOp.ADD_TRANSACTION_RESPONSE: AddTransactionResponse,
ClusterOp.SYNC_MINOR_BLOCK_LIST_REQUEST: SyncMinorBlockListRequest,
ClusterOp.SYNC_MINOR_BLOCK_LIST_RESPONSE: SyncMinorBlockListResponse,
ClusterOp.CREATE_CLUSTER_PEER_CONNECTION_REQUEST: CreateClusterPeerConnectionRequest,
ClusterOp.CREATE_CLUSTER_PEER_CONNECTION_RESPONSE: CreateClusterPeerConnectionResponse,
ClusterOp.DESTROY_CLUSTER_PEER_CONNECTION_COMMAND: DestroyClusterPeerConnectionCommand,
ClusterOp.GET_MINOR_BLOCK_REQUEST: GetMinorBlockRequest,
ClusterOp.GET_MINOR_BLOCK_RESPONSE: GetMinorBlockResponse,
ClusterOp.GET_TRANSACTION_REQUEST: GetTransactionRequest,
ClusterOp.GET_TRANSACTION_RESPONSE: GetTransactionResponse,
ClusterOp.BATCH_ADD_XSHARD_TX_LIST_REQUEST: BatchAddXshardTxListRequest,
ClusterOp.BATCH_ADD_XSHARD_TX_LIST_RESPONSE: BatchAddXshardTxListResponse,
ClusterOp.EXECUTE_TRANSACTION_REQUEST: ExecuteTransactionRequest,
ClusterOp.EXECUTE_TRANSACTION_RESPONSE: ExecuteTransactionResponse,
ClusterOp.GET_TRANSACTION_RECEIPT_REQUEST: GetTransactionReceiptRequest,
ClusterOp.GET_TRANSACTION_RECEIPT_RESPONSE: GetTransactionReceiptResponse,
ClusterOp.MINE_REQUEST: MineRequest,
ClusterOp.MINE_RESPONSE: MineResponse,
ClusterOp.GEN_TX_REQUEST: GenTxRequest,
ClusterOp.GEN_TX_RESPONSE: GenTxResponse,
ClusterOp.GET_TRANSACTION_LIST_BY_ADDRESS_REQUEST: GetTransactionListByAddressRequest,
ClusterOp.GET_TRANSACTION_LIST_BY_ADDRESS_RESPONSE: GetTransactionListByAddressResponse,
ClusterOp.GET_LOG_REQUEST: GetLogRequest,
ClusterOp.GET_LOG_RESPONSE: GetLogResponse,
ClusterOp.ESTIMATE_GAS_REQUEST: EstimateGasRequest,
ClusterOp.ESTIMATE_GAS_RESPONSE: EstimateGasResponse,
ClusterOp.GET_STORAGE_REQUEST: GetStorageRequest,
ClusterOp.GET_STORAGE_RESPONSE: GetStorageResponse,
ClusterOp.GET_CODE_REQUEST: GetCodeRequest,
ClusterOp.GET_CODE_RESPONSE: GetCodeResponse,
ClusterOp.GAS_PRICE_REQUEST: GasPriceRequest,
ClusterOp.GAS_PRICE_RESPONSE: GasPriceResponse,
ClusterOp.GET_WORK_REQUEST: GetWorkRequest,
ClusterOp.GET_WORK_RESPONSE: GetWorkResponse,
ClusterOp.SUBMIT_WORK_REQUEST: SubmitWorkRequest,
ClusterOp.SUBMIT_WORK_RESPONSE: SubmitWorkResponse,
ClusterOp.ADD_MINOR_BLOCK_HEADER_LIST_REQUEST: AddMinorBlockHeaderListRequest,
ClusterOp.ADD_MINOR_BLOCK_HEADER_LIST_RESPONSE: AddMinorBlockHeaderListResponse,
ClusterOp.CHECK_MINOR_BLOCK_REQUEST: CheckMinorBlockRequest,
ClusterOp.CHECK_MINOR_BLOCK_RESPONSE: CheckMinorBlockResponse,
ClusterOp.GET_ALL_TRANSACTIONS_REQUEST: GetAllTransactionsRequest,
ClusterOp.GET_ALL_TRANSACTIONS_RESPONSE: GetAllTransactionsResponse,
ClusterOp.GET_ROOT_CHAIN_STAKES_REQUEST: GetRootChainStakesRequest,
ClusterOp.GET_ROOT_CHAIN_STAKES_RESPONSE: GetRootChainStakesResponse,
ClusterOp.GET_TOTAL_BALANCE_REQUEST: GetTotalBalanceRequest,
ClusterOp.GET_TOTAL_BALANCE_RESPONSE: GetTotalBalanceResponse,
}
| 30.998302 | 92 | 0.691669 | [
"MIT"
] | QuarkChain/pyquarkchain | quarkchain/cluster/rpc.py | 36,516 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import marshal
from math import log, exp
from ..utils.frequency import AddOneProb
class Bayes(object):
def __init__(self):
self.d = {}
self.total = 0
def save(self, fname):
d = {}
d['total'] = self.total
d['d'] = {}
for k, v in self.d.items():
d['d'][k] = v.__dict__
if sys.version_info[0] == 3:
fname = fname + '.3'
marshal.dump(d, open(fname, 'wb'))
def load(self, fname):
if sys.version_info[0] == 3:
fname = fname + '.3'
d = marshal.load(open(fname, 'rb'))
self.total = d['total']
self.d = {}
for k, v in d['d'].items():
self.d[k] = AddOneProb()
self.d[k].__dict__ = v
def train(self, data):
for d in data:
c = d[1]
if c not in self.d:
self.d[c] = AddOneProb()
for word in d[0]:
self.d[c].add(word, 1)
self.total = sum(map(lambda x: self.d[x].getsum(), self.d.keys()))
def classify(self, x):
tmp = {}
for k in self.d:
tmp[k] = 0
for word in x:
tmp[k] += log(self.d[k].getsum()) - log(self.total)\
+ log(self.d[k].freq(word))
ret, prob = 0, 0
for k in self.d:
now = 0
for otherk in self.d:
now += exp(tmp[otherk]-tmp[k])
now = 1/now
if now > prob:
ret, prob = k, now
return (ret, prob)
| 26.145161 | 74 | 0.454041 | [
"MIT"
] | erning/snownlp | snownlp/classification/bayes.py | 1,621 | Python |
from enum import Enum
from sensors.ph_sensor import PhSensor
class SensorType(Enum):
Undefined = 0,
TDS = 1,
Ph = 2,
WaterLevel = 3,
Voltage = 4
class TriggerType(Enum):
Undefined = 0,
NutrientDose = 1,
PhDose = 2,
WaterPumpCutout = 3
class Hydriot():
sensors = dict()
triggers = dict()
def set_sensor(self, sensor_type, sensor):
self.sensors[sensor_type] = sensor
def get_sensor(self, sensor_type):
if sensor_type not in self.sensors:
return None
return self.sensors[sensor_type]
def set_trigger(self, trigger_type, trigger):
self.triggers[trigger_type] = trigger
def get_trigger(self, trigger_type):
if trigger_type not in self.triggers:
return None
return self.triggers[trigger_type]
@property
def ph_sensor(self):
return None if SensorType.Ph not in self.sensors else self.sensors[SensorType.Ph]
@property
def tds_sensor(self):
return None if SensorType.TDS not in self.sensors else self.sensors[SensorType.TDS]
@property
def water_level_sensor(self):
return None if SensorType.WaterLevel not in self.sensors else self.sensors[SensorType.WaterLevel]
@property
def voltage_sensor(self):
return None if SensorType.Voltage not in self.sensors else self.sensors[SensorType.Voltage]
@property
def nutrient_trigger(self):
return None if TriggerType.NutrientDose not in self.triggers else self.triggers[TriggerType.NutrientDose]
@property
def ph_trigger(self):
return None if TriggerType.PhDose not in self.triggers else self.triggers[TriggerType.PhDose]
@property
def water_pump_trigger(self):
return None if TriggerType.WaterPumpCutout not in self.triggers else self.triggers[TriggerType.WaterPumpCutout] | 29.21875 | 119 | 0.693583 | [
"MIT"
] | mariusvrstr/hydriot | Hydriot.PiAgent/hydriot.py | 1,870 | Python |
# -*- coding: utf-8 -*-
# (C) 2015 Muthiah Annamalai
# setup the paths
from opentamiltests import *
from solthiruthi.data_parser import *
from solthiruthi.solthiruthi import Solthiruthi
import sys
class CmdLineIO(unittest.TestCase):
def test_CLI_interface_help(self):
return
# find a way to run this test which exits the suite
_, parser = Solthiruthi.get_CLI_options(do_parse=False)
args = parser.parse_args(["--help"])
self.assertEqual(args.help, True)
def test_CLI_defaults(self):
_, parser = Solthiruthi.get_CLI_options(do_parse=False)
args = parser.parse_args(["-stdin"])
self.assertEqual(args.files, "")
self.assertEqual(args.help, False)
self.assertEqual(args.nalt, 10)
self.assertEqual(args.Dictionary, ["std"])
self.assertEqual(args.dialects, ["std"])
def test_CLI_files(self):
ips = ["-files", "poonga", "vanam", "sethu", "ezhuthu"]
_, parser = Solthiruthi.get_CLI_options(do_parse=False)
args = parser.parse_args(ips)
self.assertEqual(args.files, ips[1:])
def test_CLI_dicts(self):
ips = ["-Dict", "std", "wikipedia", "madurai"]
_, parser = Solthiruthi.get_CLI_options(do_parse=False)
args = parser.parse_args(ips)
self.assertEqual(args.Dictionary, ips[1:])
class DataParserTest(unittest.TestCase):
def test_worlists(self):
obj = DataParser.run(["data/maligaiporul.txt", "data/vilangugal.txt"])
r = obj.analysis()
self.assertEqual(r["catlen"], 5)
# self.assertEqual(r['total'],141)
self.assertEqual(
sorted(list(map(len, r["dict"].values()))), sorted([56, 28, 15, 8, 3])
)
if __name__ == "__main__":
unittest.main()
| 32.962963 | 82 | 0.639326 | [
"MIT"
] | CRE2525/open-tamil | tests/solthiruthi_data_parser.py | 1,780 | Python |
from jproperties import Properties
class ValidatorUtil:
"""
Create a validator with the property configuration
Functions :
: validate_tomcat : Validate the tomcat server configurations
: validate_property : Validate local.properties and localextensions.xml
"""
@staticmethod
def get_properties(path):
local_properties = Properties()
with open(path if path.endswith('/') else path+'/' +'local.properties','rb') as local_prop:
local_properties.load(local_prop)
return local_properties
def __init__(self,property_url):
self.properties = self.get_properties(property_url)
self.
def validate_tomcat(self):
pass
def validate_property(self):
pass | 30 | 99 | 0.664103 | [
"MIT"
] | AmirthaRajan/Hybris_Data_Analytics | validator_util.py | 780 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.