blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5b2c3503eb04efacd2e7919ac35ddba5250f7509 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_necklace.py | 1fac6e98b5fa8751e7cb2dad5ed8902cbbc4a46f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py |
#calss header
class _NECKLACE():
def __init__(self,):
self.name = "NECKLACE"
self.definitions = [u'a piece of jewellery worn around the neck, such as a chain or a string of decorative stones, beads, etc.: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
8f4f19a5ccafc9679fc5e0d74c6526c7dbc95e29 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/sims/university/university_constraint_helper.py | 909716771efb3ac9f35d60d052f093a8011886dc | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,186 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\sims\university\university_constraint_helper.py
# Compiled at: 2020-04-10 00:58:10
# Size of source mod 2**32: 4467 bytes
from interactions import ParticipantType
from sims4.resources import Types
from sims4.tuning.tunable import TunableEnumEntry, TunableReference, HasTunableSingletonFactory, TunableSet, AutoFactoryInit, TunableEnumWithFilter, TunableMapping
from singletons import EMPTY_SET
from tag import Tag
import services, sims4.log, sims4.resources
logger = sims4.log.Logger('UniversityConstraints', default_owner='nabaker')
class UniversityCourseReferenceSpawnPointTags(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'_course_slot': TunableReference(description='\n Course slot from which to pull the spawn point tags.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.CAREER)),
class_restrictions=('UniversityCourseCareerSlot', ))}
def get_tags(self, sim_info, interaction):
return self._course_slot.get_spawn_point_tags(sim_info)
class UniversitySpecificSpawnPointTags(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'spawn_point_tags': TunableMapping(description='\n University specific classroom tags.\n ',
key_type=TunableReference(manager=(services.get_instance_manager(Types.UNIVERSITY))),
value_type=TunableSet(tunable=TunableEnumWithFilter(tunable_type=Tag,
default=(Tag.INVALID),
filter_prefixes=('Spawn', )),
minlength=1))}
def get_tags(self, sim_info, interaction):
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
logger.error('Trying to get University Specific spawn point from sim {} with no degree tracker', sim_info)
return EMPTY_SET
university = degree_tracker.get_university()
if university not in self.spawn_point_tags:
return EMPTY_SET
return self.spawn_point_tags[university]
class UniversityCourseCareerSISpawnPointTags(HasTunableSingletonFactory, AutoFactoryInit):
def get_tags(self, sim_info, interaction):
if interaction is None:
return EMPTY_SET
else:
career_uid = interaction.interaction_parameters.get('career_uid')
if career_uid is None:
logger.error('Trying to get University Specific spawn point via career SI from invalid interaction: {}', interaction)
return EMPTY_SET
career = services.get_instance_manager(sims4.resources.Types.CAREER).get(career_uid)
return career is None or hasattr(career, 'get_spawn_point_tags') or EMPTY_SET
return career.get_spawn_point_tags(sim_info)
class UniversityCourseParticipantSpawnPointTags(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'participant': TunableEnumEntry(description='\n The participant from which the career ID will be obtained. \n Typically should be PickedItemId if this interaction comes via a \n CareerPickerSuperInteraction.\n ',
tunable_type=ParticipantType,
default=(ParticipantType.PickedItemId))}
def get_tags(self, sim_info, interaction):
if interaction is None:
return EMPTY_SET
else:
career_uid = interaction.get_participant(self.participant)
if career_uid is None:
logger.error('Trying to get University Specific spawn point via invalid participant {}: {}', self.participant)
return EMPTY_SET
career = services.get_instance_manager(sims4.resources.Types.CAREER).get(career_uid)
return career is None or hasattr(career, 'get_spawn_point_tags') or EMPTY_SET
return career.get_spawn_point_tags(sim_info) | [
"[email protected]"
] | |
c747fc3ca11e638cc89b1543712cdff9c07f6b21 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/enums/types/spending_limit_type.py | 8966869ba97c885d8bc4cfee59347982afb0ed3a | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v4.enums',
marshal='google.ads.googleads.v4',
manifest={
'SpendingLimitTypeEnum',
},
)
class SpendingLimitTypeEnum(proto.Message):
r"""Message describing spending limit types."""
class SpendingLimitType(proto.Enum):
r"""The possible spending limit types used by certain resources
as an alternative to absolute money values in micros.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INFINITE = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
a37f3f31d2132f4be09fa414f44faa579301711c | cbf70750d6c265e4043fd9d1d3bd835662cd680f | /customer/migrations/0010_auto_20200924_1350.py | bb3021dc69a3b0803147861040ddda0bae105fac | [
"Apache-2.0"
] | permissive | xxcfun/DJANGO_CRM | c54e249a9a3da9edaeb5d9b49e852d351c7e359a | 1f8d2d7a025f9dc54b5bf498e7a577469f74c612 | refs/heads/master | 2023-01-14T05:21:54.995601 | 2020-11-27T03:23:40 | 2020-11-27T03:23:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,590 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-09-24 05:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0009_auto_20200922_1015'),
]
operations = [
migrations.RemoveField(
model_name='customerinvoiceaddress',
name='customer',
),
migrations.RemoveField(
model_name='customershopaddress',
name='customer',
),
migrations.AddField(
model_name='customer',
name='invoice_address',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name='发票详细地址'),
),
migrations.AddField(
model_name='customer',
name='invoice_area',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='发票区域'),
),
migrations.AddField(
model_name='customer',
name='invoice_phone',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='发票收货人电话'),
),
migrations.AddField(
model_name='customer',
name='invoice_province',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='发票省份'),
),
migrations.AddField(
model_name='customer',
name='invoice_town',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='发票街道'),
),
migrations.AddField(
model_name='customer',
name='invoice_username',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='发票地收货人'),
),
migrations.AddField(
model_name='customer',
name='shop_address',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name='收货详细地址'),
),
migrations.AddField(
model_name='customer',
name='shop_area',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='收货区域'),
),
migrations.AddField(
model_name='customer',
name='shop_city',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='收货市区'),
),
migrations.AddField(
model_name='customer',
name='shop_phone',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='收货收货人电话'),
),
migrations.AddField(
model_name='customer',
name='shop_province',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='收货省份'),
),
migrations.AddField(
model_name='customer',
name='shop_town',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='收货街道'),
),
migrations.AddField(
model_name='customer',
name='shop_username',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='收货地收货人'),
),
migrations.DeleteModel(
name='CustomerInvoiceAddress',
),
migrations.DeleteModel(
name='CustomerShopAddress',
),
]
| [
"[email protected]"
] | |
cb51fe3e4eb76ad651e9fa12d44760ebbee4a239 | 9a9e0398f26cee9864d48c4618c0a482e5475e83 | /Python/code/insert_into_a_binary_search_tree.py | 13866ad295bc26375cc3cc15a81946b087db49f6 | [] | no_license | CNife/leetcode | 92693c653bb41780ee431293286c3e909009e9b0 | 7cdd61692ecb52dd1613169e80b924dd39d35996 | refs/heads/main | 2021-06-22T21:22:12.997253 | 2021-03-18T07:07:15 | 2021-03-18T07:07:15 | 206,955,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | from leetcode import TreeNode, test, new_tree, inorder_traverse
def insert_into_bst(root: TreeNode, val: int) -> TreeNode:
node, prev, is_left = root, None, False
while node:
prev = node
if val < node.val:
node, is_left = node.left, True
else:
node, is_left = node.right, False
new_node = TreeNode(val)
if prev is None:
return new_node
if is_left:
prev.left = new_node
else:
prev.right = new_node
return root
test(
insert_into_bst,
[(new_tree(4, 2, 7, 1, 3), 5, [1, 2, 3, 4, 5, 7]), (new_tree(), 1, [1])],
equals_func=lambda actual, expect: inorder_traverse(actual) == expect,
)
| [
"[email protected]"
] | |
ffc282cd0dd6bde3ffe884d9dae29cfcd248d22c | cf99f0dfd2ae3a50ac4dfe95dddd74d2308e7fd4 | /src/scalbo/scalbo/benchmark/dhb_navalpropulsion.py | 63e0681177c8fb2d58959ea10846ea3e91845071 | [
"BSD-2-Clause"
] | permissive | deephyper/scalable-bo | 33923598181799410b790addcaf4ea799b276444 | 44f0afc28a19213252b59868f76a8f6918f8aabc | refs/heads/main | 2023-07-28T10:33:52.291460 | 2023-07-20T09:44:50 | 2023-07-20T09:44:50 | 464,852,027 | 2 | 2 | BSD-2-Clause | 2022-10-18T12:15:19 | 2022-03-01T10:43:47 | Jupyter Notebook | UTF-8 | Python | false | false | 233 | py | import os
os.environ["DEEPHYPER_BENCHMARK_TASK"] = "navalpropulsion"
import deephyper_benchmark as dhb
dhb.load("HPOBench/tabular")
from deephyper_benchmark.lib.hpobench.tabular import hpo
hp_problem = hpo.problem
run = hpo.run
| [
"[email protected]"
] | |
b7e437e89e358ec335a6332b2e9fd513a60a9b1f | 59fb17c240b261040026d713a6ac9c97d6a9f265 | /gym/gym/envs/robotics/hand_env.py | 2e9d2cf735797ab4dc19d54edb06d0a5c2b45936 | [
"MIT"
] | permissive | dmeger/TeachingImitation | 3fb97499e76929959913266f127154f6ae5a8e99 | 5f4dba7e49987924c3d55cd27579cad4c71ef7a4 | refs/heads/master | 2023-03-28T13:25:01.307382 | 2021-04-06T15:07:08 | 2021-04-06T15:07:08 | 355,223,500 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,094 | py | import os
import copy
import numpy as np
import gym
from gym import error, spaces
from gym.utils import seeding
from gym.envs.robotics import robot_env
class HandEnv(robot_env.RobotEnv):
def __init__(self, model_path, n_substeps, initial_qpos, relative_control):
self.relative_control = relative_control
super(HandEnv, self).__init__(
model_path=model_path, n_substeps=n_substeps, n_actions=20,
initial_qpos=initial_qpos)
# RobotEnv methods
# ----------------------------
def _set_action(self, action):
assert action.shape == (20,)
ctrlrange = self.sim.model.actuator_ctrlrange
actuation_range = (ctrlrange[:, 1] - ctrlrange[:, 0]) / 2.
if self.relative_control:
actuation_center = np.zeros_like(action)
for i in range(self.sim.data.ctrl.shape[0]):
actuation_center[i] = self.sim.data.get_joint_qpos(
self.sim.model.actuator_names[i].replace(':A_', ':'))
for joint_name in ['FF', 'MF', 'RF', 'LF']:
act_idx = self.sim.model.actuator_name2id(
'robot0:A_{}J1'.format(joint_name))
actuation_center[act_idx] += self.sim.data.get_joint_qpos(
'robot0:{}J0'.format(joint_name))
else:
actuation_center = (ctrlrange[:, 1] + ctrlrange[:, 0]) / 2.
self.sim.data.ctrl[:] = actuation_center + action * actuation_range
self.sim.data.ctrl[:] = np.clip(self.sim.data.ctrl, ctrlrange[:, 0], ctrlrange[:, 1])
def _viewer_setup(self):
body_id = self.sim.model.body_name2id('robot0:palm')
lookat = self.sim.data.body_xpos[body_id]
for idx, value in enumerate(lookat):
self.viewer.cam.lookat[idx] = value
self.viewer.cam.distance = 0.5
self.viewer.cam.azimuth = 55.
self.viewer.cam.elevation = -25.
def render(self, mode='human', width=500, height=500):
return super(HandEnv, self).render(mode, width, height)
| [
"[email protected]"
] | |
64d756f30ed1ff762c711dbcf176d49456583621 | f7ec77ac1ba0a425e015c44a87f5f3b7cff96fdd | /custom_components/wyzeapi/climate.py | e9ab890003e453f7143b17da91cce4e85291b90b | [] | no_license | arsaboo/homeassistant-config | 099f47e14bac02c135d1c050c64c889e5c767ebe | 949a0ef28911f0bd3832e61365e5b58731915b4b | refs/heads/master | 2023-01-25T01:49:04.812101 | 2022-12-14T23:54:51 | 2022-12-14T23:54:51 | 70,074,515 | 1,831 | 562 | null | 2020-04-23T05:34:04 | 2016-10-05T15:40:46 | Python | UTF-8 | Python | false | false | 10,954 | py | """Platform for light integration."""
import logging
# Import the device class from the component that you want to support
from datetime import timedelta
from typing import List, Optional, Callable, Any
from homeassistant.components.climate import (
ClimateEntity,
SUPPORT_TARGET_TEMPERATURE_RANGE,
SUPPORT_FAN_MODE
)
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_OFF,
FAN_AUTO,
FAN_ON,
PRESET_HOME,
PRESET_AWAY,
PRESET_SLEEP,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_COOL,
CURRENT_HVAC_OFF
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION, TEMP_FAHRENHEIT, TEMP_CELSIUS
from homeassistant.core import HomeAssistant, callback
from wyzeapy import Wyzeapy, ThermostatService
from wyzeapy.services.thermostat_service import Thermostat, TemperatureUnit, HVACMode, Preset, FanMode, HVACState
from .token_manager import token_exception_handler
from .const import DOMAIN, CONF_CLIENT
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Wyze"
SCAN_INTERVAL = timedelta(seconds=30)
@token_exception_handler
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry,
async_add_entities: Callable[[List[Any], bool], None]):
"""
This function sets up the config entry so that it is available to Home Assistant
:param hass: The Home Assistant instance
:param config_entry: The current config entry
:param async_add_entities: A function to add entities
:return:
"""
_LOGGER.debug("""Creating new WyzeApi thermostat component""")
client: Wyzeapy = hass.data[DOMAIN][config_entry.entry_id][CONF_CLIENT]
thermostat_service = await client.thermostat_service
thermostats = [WyzeThermostat(thermostat_service, thermostat) for thermostat in
await thermostat_service.get_thermostats()]
async_add_entities(thermostats, True)
class WyzeThermostat(ClimateEntity):
"""
This class defines a representation of a Wyze Thermostat that can be used for Home Assistant
"""
# pylint: disable=R0902
_server_out_of_sync = False
def __init__(self, thermostat_service: ThermostatService, thermostat: Thermostat):
self._thermostat_service = thermostat_service
self._thermostat = thermostat
def set_temperature(self, **kwargs) -> None:
raise NotImplementedError
def set_humidity(self, humidity: int) -> None:
raise NotImplementedError
def set_fan_mode(self, fan_mode: str) -> None:
raise NotImplementedError
def set_hvac_mode(self, hvac_mode: str) -> None:
raise NotImplementedError
def set_swing_mode(self, swing_mode: str) -> None:
raise NotImplementedError
def set_preset_mode(self, preset_mode: str) -> None:
raise NotImplementedError
def turn_aux_heat_on(self) -> None:
raise NotImplementedError
def turn_aux_heat_off(self) -> None:
raise NotImplementedError
@property
def current_temperature(self) -> float:
return self._thermostat.temperature
@property
def current_humidity(self) -> Optional[int]:
return self._thermostat.humidity
@property
def temperature_unit(self) -> str:
if self._thermostat.temp_unit == TemperatureUnit.FAHRENHEIT:
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def hvac_mode(self) -> str:
# pylint: disable=R1705
if self._thermostat.hvac_mode == HVACMode.AUTO:
return HVAC_MODE_AUTO
elif self._thermostat.hvac_mode == HVACMode.HEAT:
return HVAC_MODE_HEAT
elif self._thermostat.hvac_mode == HVACMode.COOL:
return HVAC_MODE_COOL
else:
return HVAC_MODE_OFF
@property
def hvac_modes(self) -> List[str]:
return [HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_OFF]
@property
def target_temperature_high(self) -> Optional[float]:
return self._thermostat.cool_set_point
@property
def target_temperature_low(self) -> Optional[float]:
return self._thermostat.heat_set_point
@property
def preset_mode(self) -> Optional[str]:
raise NotImplementedError
@property
def preset_modes(self) -> Optional[List[str]]:
raise NotImplementedError
@property
def is_aux_heat(self) -> Optional[bool]:
raise NotImplementedError
@property
def fan_mode(self) -> Optional[str]:
if self._thermostat.fan_mode == FanMode.AUTO:
return FAN_AUTO
else:
return FAN_ON
@property
def fan_modes(self) -> Optional[List[str]]:
return [FAN_AUTO, FAN_ON]
@property
def swing_mode(self) -> Optional[str]:
raise NotImplementedError
@property
def swing_modes(self) -> Optional[str]:
raise NotImplementedError
@property
def hvac_action(self) -> str:
# pylint: disable=R1705
if self._thermostat.hvac_state == HVACState.IDLE:
return CURRENT_HVAC_IDLE
elif self._thermostat.hvac_state == HVACState.HEATING:
return CURRENT_HVAC_HEAT
elif self._thermostat.hvac_state == HVACState.COOLING:
return CURRENT_HVAC_COOL
else:
return CURRENT_HVAC_OFF
@token_exception_handler
async def async_set_temperature(self, **kwargs) -> None:
target_temp_low = kwargs['target_temp_low']
target_temp_high = kwargs['target_temp_high']
if target_temp_low != self._thermostat.heat_set_point:
await self._thermostat_service.set_heat_point(self._thermostat, int(target_temp_low))
self._thermostat.heat_set_point = int(target_temp_low)
if target_temp_high != self._thermostat.cool_set_point:
await self._thermostat_service.set_cool_point(self._thermostat, int(target_temp_high))
self._thermostat.cool_set_point = int(target_temp_high)
self._server_out_of_sync = True
self.async_schedule_update_ha_state()
async def async_set_humidity(self, humidity: int) -> None:
raise NotImplementedError
@token_exception_handler
async def async_set_fan_mode(self, fan_mode: str) -> None:
if fan_mode == FAN_ON:
await self._thermostat_service.set_fan_mode(self._thermostat, FanMode.ON)
self._thermostat.fan_mode = FanMode.ON
elif fan_mode == FAN_AUTO:
await self._thermostat_service.set_fan_mode(self._thermostat, FanMode.AUTO)
self._thermostat.fan_mode = FanMode.AUTO
self._server_out_of_sync = True
self.async_schedule_update_ha_state()
@token_exception_handler
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
if hvac_mode == HVAC_MODE_OFF:
await self._thermostat_service.set_hvac_mode(self._thermostat, HVACMode.OFF)
self._thermostat.hvac_mode = HVACMode.OFF
elif hvac_mode == HVAC_MODE_HEAT:
await self._thermostat_service.set_hvac_mode(self._thermostat, HVACMode.HEAT)
self._thermostat.hvac_mode = HVACMode.HEAT
elif hvac_mode == HVAC_MODE_COOL:
await self._thermostat_service.set_hvac_mode(self._thermostat, HVACMode.COOL)
self._thermostat.hvac_mode = HVACMode.COOL
elif hvac_mode == HVAC_MODE_AUTO:
await self._thermostat_service.set_hvac_mode(self._thermostat, HVACMode.AUTO)
self._thermostat.hvac_mode = HVACMode.AUTO
self._server_out_of_sync = True
self.async_schedule_update_ha_state()
async def async_set_swing_mode(self, swing_mode: str) -> None:
raise NotImplementedError
@token_exception_handler
async def async_set_preset_mode(self, preset_mode: str) -> None:
if preset_mode == PRESET_SLEEP:
await self._thermostat_service.set_preset(self._thermostat, Preset.SLEEP)
self._thermostat.preset = Preset.SLEEP
elif preset_mode == PRESET_AWAY:
await self._thermostat_service.set_preset(self._thermostat, Preset.AWAY)
self._thermostat.preset = Preset.AWAY
elif preset_mode == PRESET_HOME:
await self._thermostat_service.set_preset(self._thermostat, Preset.HOME)
self._thermostat.preset = Preset.HOME
self._server_out_of_sync = True
self.async_schedule_update_ha_state()
async def async_turn_aux_heat_on(self) -> None:
raise NotImplementedError
async def async_turn_aux_heat_off(self) -> None:
raise NotImplementedError
@property
def supported_features(self) -> int:
return SUPPORT_TARGET_TEMPERATURE_RANGE | SUPPORT_FAN_MODE
@property
def device_info(self) -> dict:
return {
"identifiers": {
(DOMAIN, self._thermostat.mac)
},
"name": self.name,
"manufacturer": "WyzeLabs",
"model": self._thermostat.product_model
}
@property
def should_poll(self) -> bool:
return False
@property
def name(self) -> str:
"""Return the display name of this lock."""
return self._thermostat.nickname
@property
def unique_id(self) -> str:
return self._thermostat.mac
@property
def available(self) -> bool:
"""Return the connection status of this light"""
return self._thermostat.available
@property
def extra_state_attributes(self):
"""Return device attributes of the entity."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"state": self.state,
"available": self.available,
"device_model": self._thermostat.product_model,
"mac": self.unique_id
}
@token_exception_handler
async def async_update(self) -> None:
"""
This function updates the state of the Thermostat
:return: None
"""
if not self._server_out_of_sync:
self._thermostat = await self._thermostat_service.update(self._thermostat)
else:
self._server_out_of_sync = False
@callback
def async_update_callback(self, thermostat: Thermostat):
"""Update the thermostat's state."""
self._thermostat = thermostat
self.async_schedule_update_ha_state()
async def async_added_to_hass(self) -> None:
"""Subscribe to update events."""
self._thermostat.callback_function = self.async_update_callback
self._thermostat_service.register_updater(self._thermostat, 30)
await self._thermostat_service.start_update_manager()
return await super().async_added_to_hass()
async def async_will_remove_from_hass(self) -> None:
self._thermostat_service.unregister_updater()
| [
"[email protected]"
] | |
21829bcc8abaacf0d9312bc98db461f1459f05bf | 64ec8731553aa08c33373b212bbe431b1a23b97c | /docs/source/examples/geochem/lambdas.py | 9633ff7db7d91920bbbc02759569103e076723dd | [
"BSD-3-Clause",
"MIT"
] | permissive | ChetanNathwani/pyrolite | 98947fde265b25beea839f24495d68bbdb726eed | 8de9c67855305115517418e127bf26de84ff062d | refs/heads/master | 2023-07-26T18:57:28.024540 | 2021-07-08T09:19:02 | 2021-07-08T09:19:02 | 367,300,779 | 0 | 0 | NOASSERTION | 2021-05-14T09:23:47 | 2021-05-14T08:35:50 | null | UTF-8 | Python | false | false | 6,232 | py | """
lambdas: Parameterising REE Profiles
=====================================
Orthogonal polynomial decomposition can be used for dimensional reduction of smooth
function over an independent variable, producing an array of independent values
representing the relative weights for each order of component polynomial. This is an
effective method to parameterise and compare the nature of smooth profiles.
In geochemistry, the most applicable use case is for reduction Rare Earth Element (REE)
profiles. The REE are a collection of elements with broadly similar physicochemical
properties (the lanthanides), which vary with ionic radii. Given their similar behaviour
and typically smooth function of normalised abundance vs. ionic radii, the REE profiles
and their shapes can be effectively parameterised and dimensionally reduced (14 elements
summarised by 3-4 shape parameters).
Here we generate some example data, reduce these to lambda values, and visualise the
results.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pyrolite.plot
# sphinx_gallery_thumbnail_number = 2
np.random.seed(82)
########################################################################################
# First we'll generate some example synthetic data based around Depleted Morb Mantle:
#
from pyrolite.util.synthetic import example_spider_data
df = example_spider_data(
noise_level=0.05,
nobs=100,
start="DMM_WH2005",
norm_to="Chondrite_PON",
offsets={"Eu": 0.2},
)
########################################################################################
# Let's have a quick look at what this REE data looks like:
#
df.pyroplot.REE(alpha=0.05, c="k", unity_line=True)
plt.show()
########################################################################################
# From this REE data we can fit a series of orthogonal polynomials, and subsequently used
# the regression coefficients ('lambdas') as a parameterisation of the REE
# pattern/profile. This example data is already normalised to Chondrite, so to avoid
# double-normalising, we pass :code:`norm_to=None`:
#
ls = df.pyrochem.lambda_lnREE(degree=4, norm_to=None)
########################################################################################
# So what's actually happening here? To get some idea of what these λ coefficients
# correspond to, we can pull this process apart and visualse our REE profiles as
# the sum of the series of orthogonal polynomial components of increasing order.
# As lambdas represent the coefficients for the regression of log-transformed normalised
# data, we'll first need to take the logarithm.
#
# With our data, we've then fit a function of ionic radius with the form
# :math:`f(r) = \lambda_0 + \lambda_1 f_1 + \lambda_2 f_2 + \lambda_3 f_3...`
# where the polynomial components of increasing order are :math:`f_1 = (r - \beta_0)`,
# :math:`f_2 = (r - \gamma_0)(r - \gamma_1)`,
# :math:`f_3 = (r - \delta_0)(r - \delta_1)(r - \delta_2)` and so on. The parameters
# :math:`\beta`, :math:`\gamma`, :math:`\delta` are pre-computed such that the
# polynomial components are indeed independent. Here we can visualise how these
# polynomial components are summed to produce the regressed profile, using the last REE
# profile we generated above as an example:
#
from pyrolite.util.lambdas import plot_lambdas_components
ax = df.iloc[-1, :].apply(np.log).pyroplot.REE(color="k", label="Data", logy=False)
plot_lambdas_components(ls.iloc[-1, :], ax=ax)
ax.legend(frameon=False, facecolor=None, bbox_to_anchor=(1, 1))
plt.show()
########################################################################################
# Note that we've not used Eu in this regression - Eu anomalies are a deviation from
# the 'smooth profile' we need to use this method. Consider this if your data might also
# exhibit significant Ce anomalies, you might need to exclude this data.
#
# Now that we've gone through a brief introduction to how the lambdas are generated,
# let's quickly check what the coefficient values themselves look like:
#
fig, ax = plt.subplots(1, 3, figsize=(9, 3))
for ix in range(ls.columns.size - 1):
ls[ls.columns[ix : ix + 2]].pyroplot.scatter(ax=ax[ix], alpha=0.1, c="k")
plt.tight_layout()
########################################################################################
# But what do these parameters correspond to? From the deconstructed orthogonal
# polynomial above, we can see that :math:`\lambda_0` parameterises relative enrichement
# (this is the mean value of the logarithm of Chondrite-normalised REE abundances),
# :math:`\lambda_1` parameterises a linear slope (here, LREE enrichemnt), and higher
# order terms describe curvature of the REE pattern. Through this parameterisation,
# the REE profile can be effectively described and directly linked to geochemical
# processes. While the amount of data we need to describe the patterns is lessened,
# the values themselves are more meaningful and readily used to describe the profiles
# and their physical significance.
#
# The visualisation of :math:`\lambda_1`-:math:`\lambda_2` can be particularly useful
# where you're trying to compare REE profiles.
#
# We've used a synthetic dataset here which is by design approximately normally
# distrtibuted, so the values themeselves here are not particularly revealing,
# but they do illustrate the expected mangitudes of values for each of the parameters.
#
# For more on using orthogonal polynomials to describe geochemical pattern data, dig
# into the paper which introduced the method to geochemists:
# O’Neill, H.S.C., 2016. The Smoothness and Shapes of Chondrite-normalized Rare Earth
# Element Patterns in Basalts. J Petrology 57, 1463–1508.
# `doi: 10.1093/petrology/egw047 <https://doi.org/10.1093/petrology/egw047>`__.
#
# .. seealso::
#
# Examples:
# `Ionic Radii <ionic_radii.html>`__,
# `REE Radii Plot <../plotting/REE_radii_plot.html>`__
#
# Functions:
# :func:`~pyrolite.geochem.pyrochem.lambda_lnREE`,
# :func:`~pyrolite.geochem.ind.get_ionic_radii`,
# :func:`pyrolite.plot.pyroplot.REE`
#
| [
"[email protected]"
] | |
597dfba8a0d880a25771aa66784fcd726a4e6cde | 952ef35ba682695a91c21f6dff8b2f0589e1fbb5 | /bemoss_lib/platform/commandProcessor.py | f511e3758dcff529b0f0d0902ecfca09d56b872b | [
"BSD-2-Clause-Views"
] | permissive | ajfar-bem/wisebldg | 8d4025dfea38829ea008e0dacb8a37362ff7098a | 0cb8ef7c5984cbb5cc86e40780fdf4e14e5bda05 | refs/heads/master | 2023-04-11T02:59:23.227173 | 2020-10-19T16:18:35 | 2020-10-19T16:18:35 | 359,859,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,694 | py | import datetime
from threading import Thread, current_thread
from multiprocessing import Process, Queue, Value
from platformData import *
import importlib
global processes_dict, archived_process_list
import threading
from BEMOSSThread import BThread, BProcess
from multiprocessing.reduction import reduce_connection
import multiprocessing
import os
agentDict = {
"basicagent": "Agents.basicAgent.BasicAgent",
"devicediscoveryagent": "Agents.deviceDiscoveryAgent.DiscoveryAgent",
"multinodeagent": "Agents.multiNodeAgent.MultiNodeAgent",
"networkagent": "Agents.networkAgent.NetworkAgent",
"approvalhelperagent": "Agents.approvalHelperAgent.ApprovalHelperAgent",
"platformmonitoragent": "Agents.platformMonitorAgent.PlatformMonitorAgent",
"thermostatagent": "Agents.thermostatAgent.ThermostatAgent",
"tsdagent": "Agents.tsdAgent.TSDAgent",
"vipagent": "Agents.vipAgent.VIPAgent",
"bacnetagent": "Agents.BACnetAgent.BACnetAgent",
"lightingscheduleragent": "Applications.lightingSchedulerAgent.LightingSchedulerAgent",
"plugloadscheduleragent": "Applications.plugloadSchedulerAgent.PlugloadSchedulerAgent",
"illuminancebasedlightingcontrol":"Applications.illuminanceBasedLightingControl.IlluminanceBasedLightingControl",
"thermostatcontrolagent": "Applications.thermostatControlAgent.ThermostatControlAgent",
"faultdetection": "Applications.faultDetection.FaultDetectionAgent",
"demandresponse": "Applications.demandResponse.DemandResponse",
"gatewayagent":"Agents.GatewayAgent.WebSocketAgent",
"gatewaydeviceagent":"Agents.gatewayDeviceAgent.GatewayDeviceAgent",
"metadataagent":"Agents.metadataAgent.MetadataAgent",
"scheduleragent":"Agents.schedulerAgent.SchedulerAgent"
}
process_count = 1000
#these agents are run as thread, instead of a different process
useThreadsInstead = ['basicagent','thermostatagent']
def getAgent(agent_name):
agent_full_path = agentDict.get(agent_name.lower(),None)
if agent_full_path is None:
#bad agent name
return None
agent_module_name = ".".join(agent_full_path.split(".")[:-1])
agent_name = agent_full_path.split(".")[-1]
agent_module = importlib.import_module(agent_module_name)
agent = getattr(agent_module,agent_name)
return agent
def processCommand(command):
global process_count
def find_matches(pid):
matches = []
l = len(pid)
for process_name, process in processes_dict.items():
if str(process.id)[-l:] == str(pid):
matches.append(process_name)
return matches
commands = command.split(' ')
reply = ''
if len(commands) == 2 and commands[0].lower() == "error":
agent_name = commands[1] # assume the argument is agent_name
try:
process = processes_dict.get(agent_name)
error = process.err
except AttributeError:
return ""
return error
elif commands[0].lower() == 'status':
running_count = 0
stopped_count = 0
stop_requested_count = 0
for process_name, process in processes_dict.items():
if process.is_alive():
if process.config['stopFlag'].value == 0:
status, code = "running", ""
running_count +=1
else:
status, code = "stop_requested",""
stop_requested_count +=1
else:
status, code = "stopped",process.exitcode
stopped_count += 1
if hasattr(process,'pid'):
pid = process.pid
else:
pid = os.getpid()
reply += "{:<6} {:<10} {:<30} {:<6} {:<10}\n".format(process.id, pid, process.name, status, code)
reply += "Total %s Agents. Running: %s. Stoped: %s. Stop_Requested: %s" % (len(processes_dict.keys()),running_count,stopped_count,stop_requested_count)
return reply
# elif commands[0].lower() == 'tstatus': #thread_status
# threads = threading.enumerate()
#
# replies = []
# for thread in threads:
# if hasattr(thread, 'id'):
# replies.append((thread.id,thread.name))
# else:
# if hasattr(thread,'parent') and hasattr(thread.parent,'id'):
# replies.append((thread.parent.id,thread.name))
# else:
# replies.append((-1,thread.name))
#
# replies = sorted(replies,key=lambda x: x[0])
# if len(commands) > 1:
# id = commands[1] #filter only this
# new_reply = []
# for rep in replies:
# if str(rep[0]) == str(id):
# new_reply.append(rep)
# replies = new_reply
# for id, name in replies:
# reply +=("{:<6} {:<50}\n".format(id, name))
#
# return reply
elif len(commands) == 2 and commands[0].lower() == "tstatus":
target = commands[1]
leftPipe, rightPipe = multiprocessing.Pipe()
reduced_Pipe = reduce_connection(rightPipe)
finalmessage = {"reduced_return_pipe": reduced_Pipe}
outQueue.put(("commandhandler", [target], 'tstatus', finalmessage))
if leftPipe.poll(20):
result = leftPipe.recv()
reply = ""
count = 0
alive_count = 0
for id, name, status, watchdogtimer, msg in result:
reply += "{:<6} {:<60} {:<6} {:<5} {:<10}\n".format(id, name, status, watchdogtimer, msg)
count += 1
if status:
alive_count += 1
reply += "Total %s threads. Running: %s. Stoped: %s." % (count, alive_count, count-alive_count)
return reply
else:
return "Timed out"
elif len(commands) == 2 and commands[0].lower() in ["start", "start_agent", "stop", "stop_agent"]:
agent_name = commands[1] #assume the argument is agent_name
if not agent_name in processes_dict: #if agent_name is not present
#assume it to be pid from the end
matches = find_matches(pid=agent_name)
if len(matches) == 1:
agent_name = matches[0]
if len(matches) == 0:
return "invalid pid"
elif len(matches) > 1:
reply = "\n".join(
[str(processes_dict[process_name].id) + " " + process_name for process_name in matches])
return reply
process_name = agent_name
process = processes_dict[process_name]
if commands[0].lower() in ["start","start_agent"]:
old_agent = process.agent
old_config = process.config
old_config['stopFlag'].value = 0
old_config['id'] = process_count
p = BProcess(target=process.agent, name=process.name, kwargs=old_config)
p.id = process_count
process_count += 1
p.agent = old_agent
p.config = old_config
p.start()
processes_dict[process_name] = p
else:
process.config['stopFlag'].value = 1
archived_process_list.append((datetime.datetime.utcnow(), process))
return reply
elif len(commands) == 3 and commands[0].lower() in ["launch_agent","start"]:
agent_type = commands[1]
agent_name = commands[2]
agent = getAgent(agent_type)
if agent:
print "Launching " + agent.__name__ + " with name: " + agent_name
inQ = Queue(AGENT_QUEUE_SIZE)
inQueues_dict[agent_name] = inQ
stopFlag = Value('i',0)
config = {'name': agent_name, 'inQ': inQ, 'outQ': outQueue, 'logQ':logQueue, 'stopFlag': stopFlag, 'id':process_count}
p = BProcess(target=agent, name=agent_name, kwargs=config)
p.id = process_count
process_count += 1
p.agent = agent
p.config = config
p.daemon = True
p.start()
if agent_name in processes_dict: # if there is existing process by that name, terminate it
processes_dict[agent_name].config['stopFlag'].value = 1 # make the old process stop
archived_process_list.append((datetime.datetime.utcnow(), processes_dict[agent_name])) #TODO kill processes in archived process if still running and clear it periodically
processes_dict[agent_name] = p
return ""
else:
return agent_type + " Agent not found"
elif commands[0].lower() in ["remove_agent","remove"]:
if len(commands) == 2:
agent_name = commands[1] #assume the argument is agent_name
else:
return "Invalid number of arguments"
if not agent_name in processes_dict: #if agent_name is not present
#assume it to be pid from the end
matches = find_matches(pid=agent_name)
if len(matches) == 1:
agent_name = matches[0]
elif len(matches) == 0:
return "invalid pid"
else:
return " ".join([processes_dict[process_name].id for process_name in matches])
if agent_name in processes_dict or find_matches(pid=agent_name):
if processes_dict[agent_name].is_alive():
if processes_dict[agent_name].config['stopFlag'].value == 1:
return "Wait for it to stop first"
else:
return "Should stop first"
processes_dict.pop(agent_name) #remove it from process-dict
inQueues_dict.pop(agent_name)
return ""
else:
return "Invalid command" | [
"[email protected]"
] | |
8fb745233b67a3ca9ae885b064cd5d202c60325f | 1108586cf9e962a5be71536fc58a3837196c31e1 | /core/urls.py | b354185636a66849ba918552b8cd4c20b4f206de | [] | no_license | DeepakDarkiee/drf_search_viewset | 4abad47aad7c26bb729081e23b930b33483f24cf | ca014fe39a148e2e7f68cbb9b94cf71e93f971f7 | refs/heads/master | 2023-05-05T02:30:41.203749 | 2021-05-21T11:08:03 | 2021-05-21T11:08:03 | 369,508,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | from django.conf.urls import url
from rest_framework import routers
from core.views import StudentViewSet, UniversityViewSet
router = routers.DefaultRouter()
router.register('students', StudentViewSet)
router.register('universities', UniversityViewSet)
urlpatterns = router.urls | [
"[email protected]"
] | |
0ea9cf7e3399ef2186f2a21d7e5bbad6f4a1d92c | f797c5fc3243944855ff9304a678f9d89ff85f93 | /src/state/ui/ui.py | 4c7a98ae01d648f5511fbd433450f312f67f8f1d | [] | no_license | thydungeonsean/Rainbowmancer | 551e857a85b76489b1cee3feb0e40f7832919712 | b8395fd2c25b83c84239a1aa198a0d134d7c60be | refs/heads/master | 2021-01-25T06:57:05.968319 | 2017-07-17T12:00:07 | 2017-07-17T12:00:07 | 93,628,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | from crystal_panel import CrystalPanel
from character_panel import CharacterPanel
from ability_panel import AbilityPanel
class UI(object):
def __init__(self, game):
self.game = game
self.ui_objects = []
self.panels = {}
self.initialize()
def initialize(self):
crystal = CrystalPanel(self)
self.add_ui_object(crystal)
self.panels['crystal'] = crystal
character = CharacterPanel(self)
self.add_ui_object(character)
self.panels['character'] = character
ability = AbilityPanel(self)
self.add_ui_object(ability)
self.panels['ability'] = ability
def add_ui_object(self, obj):
self.ui_objects.append(obj)
def remove_ui_object(self, obj):
self.ui_objects.remove(obj)
def draw(self, surface, tick):
for obj in self.ui_objects:
obj.draw(surface, tick)
def run(self):
pass
def click(self, point):
for panel in self.ui_objects:
clicked = panel.click(point)
if clicked:
return True
return False
def right_click(self, point):
for panel in self.ui_objects:
right_clicked = panel.right_click(point)
if right_clicked:
return True
return False
| [
"[email protected]"
] | |
14051aef8bd1c1cf2f5c709908ed00a5830eb788 | 153c7c69c0e249dfd7b8bc9cfe18724b9b45ebf2 | /PyOptim/core/datainterface.py | cda80072be9ed1b91aca352de023e7ff2650c594 | [
"BSD-3-Clause"
] | permissive | bitfort/py-optim | e729ad8c80bffb3a843ccdded4a5051c516dd9f5 | 22e774008cd3c83c9e69de546b8733bd21729554 | refs/heads/master | 2021-01-18T10:11:23.853286 | 2013-04-13T01:33:50 | 2013-04-13T01:33:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,612 | py | from random import shuffle
from scipy import reshape, array
from numpy.matlib import repmat
from pybrain.utilities import setAllArgs
class SampleProvider(object):
""" Unified interface for interacting with a model:
given a data sample and a parameter vector, it produces
gradients, loss values, and potentially other terms like
diagonal Hessians.
The samples are iteratively generated, either from a dataset, or from a
function, individually or in minibatches, shuffled or not.
"""
batch_size = 1
#optional function that generates diagonal Hessian approximations
diaghess_fun = None
def __init__(self, paramdim, loss_fun, gradient_fun, **kwargs):
self.paramdim = paramdim
self.loss_fun = loss_fun
self.gradient_fun = gradient_fun
setAllArgs(self, kwargs)
def nextSamples(self, how_many):
""" Obtain a certain number of samples. """
self.batch_size = how_many
self._provide()
def _provide(self):
""" abstract """
def reset(self):
""" abstract """
def currentLosses(self, params):
return self.loss_fun(params)
def currentGradients(self, params):
return self.gradient_fun(params)
def currentDiagHess(self, params):
if self.diaghess_fun is None:
return
return self.diaghess_fun(params)
class FunctionWrapper(SampleProvider):
""" Specialized case for a function that can generate samples on the fly. """
record_samples = False
def __init__(self, dim, stochfun, **kwargs):
self.stochfun = stochfun
self._seen = []
SampleProvider.__init__(self, dim, loss_fun=stochfun._f,
gradient_fun=stochfun._df,
diaghess_fun=stochfun._ddf, **kwargs)
stochfun._retain_sample = True
def _provide(self):
self.stochfun._newSample(self.paramdim*self.batch_size, override=True)
if self.record_samples:
ls = self.stochfun._lastseen
if self.batch_size == 1:
self._seen.append(ls)
else:
for l in reshape(ls, (self.batch_size, self.paramdim)):
self._seen.append(reshape(l, (1, self.paramdim)))
def currentLosses(self, params):
if self.batch_size > 1:
params = repmat(params, 1, self.batch_size)
res = self.loss_fun(params)
return reshape(res, (self.batch_size, self.paramdim))
else:
return self.loss_fun(params)
def currentGradients(self, params):
if self.batch_size > 1:
params = repmat(params, 1, self.batch_size)
res = self.gradient_fun(params)
return reshape(res, (self.batch_size, self.paramdim))
else:
return self.gradient_fun(params)
def currentDiagHess(self, params):
if self.diaghess_fun is None:
return
if self.batch_size > 1:
params = repmat(params, 1, self.batch_size)
res = self.diaghess_fun(params)
return reshape(res, (self.batch_size, self.paramdim))
else:
return self.diaghess_fun(params)
def __str__(self):
return self.stochfun.__class__.__name__+" n=%s curv=%s "%(self.stochfun.noiseLevel, self.stochfun.curvature)
class DatasetWrapper(SampleProvider):
""" Specialized case for datasets """
shuffling = True
def reset(self, dataset=None):
if dataset is not None:
self.dataset = dataset
assert len(dataset) > 0, 'Must be non-empty'
self._indices = range(len(self.dataset))
self._counter = 0
def getIndex(self):
tmp = self._counter % len(self.dataset)
if tmp + self.batch_size > len(self.dataset):
# dataset is not a multiple of batchsizes
tmp = 0
if tmp == 0 and self.shuffling:
shuffle(self._indices)
#if len(self.dataset) < self.batch_size:
# print 'WARNING: Dataset smaller than batchsize'
return self._indices[tmp]
class ModuleWrapper(DatasetWrapper):
""" A wrapper around a PyBrain module that defines a forward-backward,
and a corresponding dataset.
Assumption: MSE of targets is the criterion used. """
def __init__(self, dataset, module, **kwargs):
setAllArgs(self, kwargs)
self.module = module
self.paramdim = module.paramdim
self._ready = False
self.reset(dataset)
def _provide(self):
start = self.getIndex()
# reuse samples multiple times if the dataset is too smalle
self._currentSamples = [self.dataset.getSample(si%len(self.dataset)) for si in range(start, start+self.batch_size)]
self._counter += self.batch_size
self._ready = False
def loss_fun(self, params):
self._forwardBackward(params)
return self._last_loss
def gradient_fun(self, params):
self._ready = False
self._forwardBackward(params)
return self._last_grads
def _forwardBackward(self, params):
if self._ready:
return
losses = []
grads = []
for inp, targ in self._currentSamples:
self.module._setParameters(params)
self.module.resetDerivatives()
self.module.reset()
outp = self.module.activate(inp)
losses.append(0.5 * sum((outp - targ)**2))
self.module.backActivate(outp-targ)
grads.append(self.module.derivs.copy())
self._last_loss = array(losses)
self._last_grads = reshape(array(grads), (self.batch_size, self.paramdim))
self._ready = True
class DataFunctionWrapper(DatasetWrapper, FunctionWrapper):
""" Data from a stochastic function. """
def __init__(self, dataset, stochfun, **kwargs):
dim = dataset[0].size
FunctionWrapper.__init__(self, dim, stochfun, **kwargs)
self.reset(dataset)
def _provide(self):
i = self.getIndex()
if self.batch_size == 1:
x = self.dataset[i]
else:
x = array(self.dataset[i:i+self.batch_size])
self.stochfun._lastseen = reshape(x, (1, self.batch_size * self.paramdim))
self._counter += self.batch_size
| [
"[email protected]"
] | |
41d548ecd1ca4ca0d1bdabc65eab2e8717e5a4b8 | 02508aa773dcbd9939eb879952ee2cb3dd90bcad | /test/onnx/test_utility_funs.py | 71f52b306b8c4699ed7fa5d53e208defcb16a0d4 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | dhivyadharshin/pytorch | d8a3b7f3c03e21e776ea34788d13743467b738c8 | 6a170011876bb8bd1909e8f60fba1270ac7a5577 | refs/heads/master | 2023-07-18T07:31:52.918955 | 2021-08-17T18:12:01 | 2021-08-17T18:12:01 | 397,330,616 | 5 | 0 | NOASSERTION | 2021-08-17T18:12:02 | 2021-08-17T16:57:16 | null | UTF-8 | Python | false | false | 39,565 | py | from test_pytorch_common import TestCase, run_tests
import torch
import torch.onnx
from torch.onnx import utils, OperatorExportTypes, TrainingMode
from torch.onnx.symbolic_helper import _set_opset_version, _set_operator_export_type, _set_onnx_shape_inference
import torch.utils.cpp_extension
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion, skipIfUnsupportedOpsetVersion
import caffe2.python.onnx.backend as backend
from verify import verify
import torchvision
import onnx
import io
import copy
import unittest
skip = unittest.skip
class TestUtilityFuns(TestCase):
opset_version = 9
def setUp(self):
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
def _model_to_graph(self, model, input,
do_constant_folding=True,
example_outputs=None,
training=TrainingMode.EVAL,
operator_export_type=OperatorExportTypes.ONNX,
input_names=None,
dynamic_axes=None):
# Need disable onnx_shape_inference for this test because it puts const node to initializers.
_set_onnx_shape_inference(False)
utils._validate_dynamic_axes(dynamic_axes, model, None, None)
graph, params_dict, torch_out = utils._model_to_graph(model, input,
do_constant_folding=do_constant_folding,
_disable_torch_constant_prop=True,
operator_export_type=operator_export_type,
training=training,
example_outputs=example_outputs,
input_names=input_names,
dynamic_axes=dynamic_axes)
_set_onnx_shape_inference(True)
return graph, params_dict, torch_out
def test_is_in_onnx_export(self):
test_self = self
class MyModule(torch.nn.Module):
def forward(self, x):
test_self.assertTrue(torch.onnx.is_in_onnx_export())
raise ValueError
return x + 1
x = torch.randn(3, 4)
f = io.BytesIO()
try:
torch.onnx.export(MyModule(), x, f, opset_version=self.opset_version)
except ValueError:
self.assertFalse(torch.onnx.is_in_onnx_export())
def test_validate_dynamic_axes_invalid_input_output_name(self):
import warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
utils._validate_dynamic_axes({"input1": {}, "output": {},
"invalid_name1": {}, "invalid_name2": {}},
None, ["input1", "input2"], ["output"])
messages = [str(warning.message) for warning in w]
assert "Provided key invalid_name1 for dynamic axes is not a valid input/output name" in messages
assert "Provided key invalid_name2 for dynamic axes is not a valid input/output name" in messages
assert len(messages) == 2
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_to_slice(self):
class SplitModule(torch.nn.Module):
def forward(self, x, y, t):
splits = (x.size(1), y.size(1))
out, out2 = torch.split(t, splits, dim=1)
return out, out2
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.randn(2, 3)
y = torch.randn(2, 4)
t = torch.randn(2, 7)
graph, _, _ = self._model_to_graph(SplitModule(), (x, y, t), input_names=['x', 'y', 't'],
dynamic_axes={'x': [0, 1], 'y': [0, 1], 't': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::SplitToSequence"
def test_output_list(self):
class PaddingLayer(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input_t):
# type: (Tensor) -> Tensor
for i in range(2):
input_t = input_t * 2
return input_t
input_t = torch.ones(size=[10], dtype=torch.long)
model = torch.jit.script(PaddingLayer())
example_output = model(input_t)
with self.assertRaises(RuntimeError):
torch.onnx.export(model,
(input_t, ),
"test.onnx",
opset_version=self.opset_version,
example_outputs=[example_output])
def test_constant_fold_transpose(self):
class TransposeModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = torch.transpose(a, 1, 0)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(3, 2)
graph, _, __ = self._model_to_graph(TransposeModule(), (x, ), input_names=['x'],
dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Transpose"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
assert len(list(graph.nodes())) == 1
def test_constant_fold_reduceL2(self):
class ReduceModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = torch.norm(a, p=2, dim=-2, keepdim=False)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(2, 3)
graph, _, __ = self._model_to_graph(ReduceModule(), (x, ), input_names=['x'],
dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::ReduceL2"
assert len(list(graph.nodes())) == 1
def test_constant_fold_reduceL1(self):
class NormModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = torch.norm(a, p=1, dim=-2)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(2, 3)
graph, _, __ = self._model_to_graph(NormModule(), (x, ), input_names=['x'],
dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::ReduceL1"
assert len(list(graph.nodes())) == 1
def test_constant_fold_slice(self):
class NarrowModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = torch.narrow(a, 0, 0, 1)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 3)
graph, _, __ = self._model_to_graph(NarrowModule(), (x, ), input_names=['x'],
dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Slice"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
assert len(list(graph.nodes())) == 1
def test_constant_fold_slice_index_exceeds_dim(self):
class SliceIndexExceedsDimModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = a[1:10] # index exceeds dimension
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 3)
graph, _, __ = self._model_to_graph(SliceIndexExceedsDimModule(), (x, ), input_names=['x'],
dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Slice"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
assert len(list(graph.nodes())) == 1
def test_constant_fold_slice_negative_index(self):
class SliceNegativeIndexModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = a[0:-1] # index relative to the end
c = torch.select(a, dim=-1, index=-2)
d = torch.select(a, dim=1, index=0)
return b + x, c + d
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 3)
graph, _, __ = self._model_to_graph(SliceNegativeIndexModule(), (x, ), input_names=['x'],
dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Slice"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
def test_constant_fold_gather(self):
class GatherModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = torch.select(a, dim=1, index=-2)
c = torch.index_select(a, dim=-2, index=torch.tensor([0, 1]))
return b + 1, c + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 3)
model = GatherModule()
model(x)
graph, _, __ = self._model_to_graph(GatherModule(), (x, ), input_names=['x'],
dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Gather"
def test_constant_fold_unsqueeze(self):
class UnsqueezeModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = torch.unsqueeze(a, -2)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 2, 3)
graph, _, __ = self._model_to_graph(UnsqueezeModule(), (x, ), input_names=['x'],
dynamic_axes={'x': [0, 1, 2]})
for node in graph.nodes():
assert node.kind() != "onnx::Unsqueeze"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
assert len(list(graph.nodes())) == 1
def test_constant_fold_unsqueeze_multi_axies(self):
class PReluModel(torch.nn.Module):
def __init__(self):
super(PReluModel, self).__init__()
self.prelu = torch.nn.PReLU()
def forward(self, x):
a = torch.randn(2, 3, 4, 5, 8, 7)
return self.prelu(x) + a
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.randn(2, 3, 4, 5, 8, 7)
graph, _, __ = self._model_to_graph(PReluModel(), x, input_names=['x'],
dynamic_axes={'x': [0, 1, 2, 3, 4, 5]})
for node in graph.nodes():
assert node.kind() != "onnx::Unsqueeze"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
assert len(list(graph.nodes())) == 4
def test_constant_fold_squeeze_without_axes(self):
class SqueezeModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[[1., 2., 3.], [4., 5., 6.]]])
return torch.squeeze(a) + x + torch.squeeze(a)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(2, 3)
graph, _, __ = self._model_to_graph(SqueezeModule(), (x, ), input_names=['x'],
dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Squeeze"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
assert len(list(graph.nodes())) == 2
def test_constant_fold_squeeze_with_axes(self):
class SqueezeAxesModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[[1., 2., 3.], [4., 5., 6.]]])
return torch.squeeze(a, dim=-3) + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(2, 3)
graph, _, __ = self._model_to_graph(SqueezeAxesModule(), (x, ), input_names=['x'],
dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Squeeze"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
assert len(list(graph.nodes())) == 1
def test_constant_fold_concat(self):
class ConcatModule(torch.nn.Module):
def forward(self, x):
# Why did I insert a Cast here? There appears to be intentional
# behavior in ONNX constant folding where constant tensors which
# are not attached to any known to be foldable onnx
# operations don't get extracted into the initializer graph. So
# without these casts, we will actually fail to pull out one of
# the constants, thus failing constant folding. I think the
# test is wrong but I don't have time to write a more correct
# test (I think the right way to go about the test is to setup
# a predicate for what invariant graphs should hold after
# constant folding, and then verify this predicate holds.
# I think the asserts below are an attempt at this predicate,
# but it is not right!)
#
# More commentary at
# https://github.com/pytorch/pytorch/pull/18698/files#r340107552
a = torch.tensor([[1., 2., 3.]]).to(torch.float)
b = torch.tensor([[4., 5., 6.]]).to(torch.float)
c = torch.cat((a, b), 0)
d = b + c
return x + d
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(2, 3)
graph, _, __ = self._model_to_graph(ConcatModule(), (x, ), input_names=['x'],
dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Concat"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
assert len(list(graph.nodes())) == 1
def test_constant_fold_lstm(self):
class GruNet(torch.nn.Module):
def __init__(self):
super(GruNet, self).__init__()
self.mygru = torch.nn.GRU(7, 3, 1, bidirectional=False)
def forward(self, input, initial_state):
return self.mygru(input, initial_state)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
input = torch.randn(5, 3, 7)
h0 = torch.randn(1, 3, 3)
graph, _, __ = self._model_to_graph(GruNet(), (input, h0), input_names=['input', 'h0'],
dynamic_axes={'input': [0, 1, 2], 'h0': [0, 1, 2]})
for node in graph.nodes():
assert node.kind() != "onnx::Slice"
assert node.kind() != "onnx::Concat"
assert node.kind() != "onnx::Unsqueeze"
if self.opset_version <= 12:
assert len(list(graph.nodes())) == 3
else:
# Unsqueeze op parameter "axes" as an input instead of as an attribute when opset version >= 13
assert len(list(graph.nodes())) == 4
def test_constant_fold_transpose_matmul(self):
class MatMulNet(torch.nn.Module):
def __init__(self):
super(MatMulNet, self).__init__()
self.B = torch.nn.Parameter(torch.ones(5, 3))
def forward(self, A):
return torch.matmul(A, torch.transpose(self.B, -1, -2))
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
A = torch.randn(2, 3)
graph, _, __ = self._model_to_graph(MatMulNet(), (A, ),
input_names=['A'], dynamic_axes={'A': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Transpose"
assert len(list(graph.nodes())) == 1
def test_constant_fold_reshape(self):
class ReshapeModule(torch.nn.Module):
def __init__(self, ):
super(ReshapeModule, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
b = self.weight.reshape(1, -1, 1, 1)
return x * b
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.randn(4, 5)
graph, _, __ = self._model_to_graph(ReshapeModule(), (x, ),
input_names=['x'], dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Reshape"
assert len(list(graph.nodes())) == 1
def test_constant_fold_div(self):
class Module(torch.nn.Module):
def __init__(self, ):
super(Module, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
div = self.weight.div(torch.tensor([1, 2, 3, 4, 5]))
return div * x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = self._model_to_graph(Module(), (x, ), input_names=['x'],
dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Div"
assert len(list(graph.nodes())) == 1
def test_constant_fold_mul(self):
class Module(torch.nn.Module):
def __init__(self, ):
super(Module, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
mul = self.weight.mul(torch.tensor([1, 2, 3, 4, 5]))
return mul / x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = self._model_to_graph(Module(), (x, ), input_names=['x'],
dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Mul"
assert len(list(graph.nodes())) == 1
def test_constant_fold_add(self):
class Module(torch.nn.Module):
def __init__(self, ):
super(Module, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
add = self.weight + torch.tensor([1, 2, 3, 4, 5])
return add - x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, params_dict, __ = self._model_to_graph(
Module(), (x, ), do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX,
input_names=['x'], dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
self.assertTrue(node.kind() != "onnx::Add")
self.assertEqual(len(list(graph.nodes())), 1)
params = list(params_dict.values())
self.assertEqual(len(params), 1)
weight = params[0]
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(weight, torch.tensor([2, 3, 4, 5, 6]))
def test_constant_fold_sub(self):
class Module(torch.nn.Module):
def __init__(self, ):
super(Module, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
sub = self.weight - torch.tensor([1, 2, 3, 4, 5])
return sub + x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, params_dict, __ = self._model_to_graph(
Module(), (x, ), do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX, input_names=['x'], dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Sub"
self.assertEqual(len(list(graph.nodes())), 1)
params = list(params_dict.values())
self.assertEqual(len(params), 1)
weight = params[0]
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(weight, torch.tensor([0, -1, -2, -3, -4]))
def test_constant_fold_sqrt(self):
class Module(torch.nn.Module):
def __init__(self, ):
super(Module, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
sqrt = torch.sqrt(self.weight)
return sqrt / x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = self._model_to_graph(Module(), (x, ), input_names=['x'], dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Sqrt"
assert len(list(graph.nodes())) == 1
def test_constant_fold_shape(self):
class ShapeModule(torch.nn.Module):
def __init__(self):
super(ShapeModule, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
shape = self.weight.shape[0]
return x + shape
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = self._model_to_graph(ShapeModule(), (x, ), input_names=['x'], dynamic_axes={'x': [0, 1]})
for node in graph.nodes():
assert node.kind() != "onnx::Shape"
assert len(list(graph.nodes())) == 1
def test_strip_doc_string(self):
class MyModule(torch.nn.Module):
def forward(self, input):
return torch.exp(input)
x = torch.randn(3, 4)
def is_model_stripped(f, strip_doc_string=None):
if strip_doc_string is None:
torch.onnx.export(MyModule(), x, f, opset_version=self.opset_version)
else:
torch.onnx.export(MyModule(), x, f, strip_doc_string=strip_doc_string,
opset_version=self.opset_version)
model = onnx.load(io.BytesIO(f.getvalue()))
model_strip = copy.copy(model)
onnx.helper.strip_doc_string(model_strip)
return model == model_strip
# test strip_doc_string=True (default)
self.assertTrue(is_model_stripped(io.BytesIO()))
# test strip_doc_string=False
self.assertFalse(is_model_stripped(io.BytesIO(), False))
# NB: remove this test once DataParallel can be correctly handled
def test_error_on_data_parallel(self):
model = torch.nn.DataParallel(torch.nn.ReflectionPad2d((1, 2, 3, 4)))
x = torch.randn(1, 2, 3, 4)
f = io.BytesIO()
with self.assertRaisesRegex(ValueError,
"torch.nn.DataParallel is not supported by ONNX "
"exporter, please use 'attribute' module to "
"unwrap model from torch.nn.DataParallel. Try "):
torch.onnx.export(model, x, f, opset_version=self.opset_version)
def test_export_mode(self):
class MyModule(torch.nn.Module):
def forward(self, x):
y = x + 1
return y
model = MyModule()
x = torch.randn(10, 3, 128, 128)
f = io.BytesIO()
# set mode to in inference mode and export in training mode
model.eval()
old_state = model.training
torch.onnx.export(model, (x,), f,
opset_version=self.opset_version, training=torch.onnx.TrainingMode.TRAINING)
# verify that the model state is preserved
assert model.training == old_state
# set mode to training mode and export in inference mode
model.train()
old_state = model.training
torch.onnx.export(model, (x,), f,
opset_version=self.opset_version, training=torch.onnx.TrainingMode.EVAL)
# verify that the model state is preserved
assert model.training == old_state
def test_diagnose_export_mode(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.cumsum(x, dim=0)
model = MyModule()
x = torch.randn(2, 3, 4)
f = io.BytesIO()
# run export in diagnose mode
graph, unsupported_ops = utils._find_missing_ops_onnx_export(model, (x,), f,
opset_version=9)
iter = graph.nodes()
assert next(iter).kind() == "onnx::Constant"
assert next(iter).kind() == "prim::Constant"
assert next(iter).kind() == "aten::cumsum"
assert len(unsupported_ops) == 1
assert unsupported_ops == ["aten::cumsum"]
def test_aten_fallthrough(self):
# Test aten export of op with no symbolic
class Module(torch.nn.Module):
def forward(self, x):
return torch.triu(x)
x = torch.randn(2, 3, 4)
_set_opset_version(self.opset_version)
graph, _, __ = self._model_to_graph(Module(), (x, ),
operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH,
input_names=['x'], dynamic_axes={'x': [0, 1, 2]})
iter = graph.nodes()
assert next(iter).kind() == "onnx::Constant"
assert next(iter).kind() == "aten::triu"
def test_custom_op_fallthrough(self):
# Test custom op
op_source = """
#include <torch/script.h>
torch::Tensor custom_add(torch::Tensor self, torch::Tensor other) {
return self + other;
}
static auto registry =
torch::RegisterOperators("custom_namespace::custom_op", &custom_add);
"""
torch.utils.cpp_extension.load_inline(
name="custom_add",
cpp_sources=op_source,
is_python_module=False,
verbose=True,
)
class FooModel(torch.nn.Module):
def forward(self, input, other):
# Calling custom op
return torch.ops.custom_namespace.custom_op(input, other)
x = torch.randn(2, 3, 4, requires_grad=False)
y = torch.randn(2, 3, 4, requires_grad=False)
model = FooModel()
graph, _, __ = self._model_to_graph(model, (x, y),
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH,
input_names=['x', 'y'],
dynamic_axes={'x': [0, 1, 2], 'y': [0, 1, 2]})
iter = graph.nodes()
assert next(iter).kind() == "custom_namespace::custom_op"
def test_onnx_fallthrough(self):
# Test aten export of op with symbolic for aten
x = torch.randn(100, 128)
y = torch.randn(100, 128)
model = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
graph, _, __ = self._model_to_graph(model, (x, y),
operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH,
input_names=['x', 'y'],
dynamic_axes={'x': [0, 1], 'y': [0, 1]})
iter = graph.nodes()
assert next(iter).kind() == "onnx::Constant"
assert next(iter).kind() == "onnx::Constant"
assert next(iter).kind() == "aten::cosine_similarity"
def test_quantized_fallthrough(self):
# Test Quantized op
class QModule(torch.nn.Module):
def __init__(self):
super(QModule, self).__init__()
self.quant1 = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
res = self.quant1(x)
return self.dequant(res)
model = QModule()
torch.backends.quantized.engine = "qnnpack"
pt_inputs = (torch.randn(1, 2, 3, 4))
model.qconfig = torch.quantization.default_qconfig
q_model = torch.quantization.prepare(model, inplace=False)
q_model = torch.quantization.convert(q_model, inplace=False)
q_model.eval()
output = q_model(*pt_inputs)
graph, _, __ = self._model_to_graph(q_model, pt_inputs, example_outputs=output,
operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH,
input_names=['pt_inputs'],
dynamic_axes={'pt_inputs': [0, 1, 2, 3]})
iter = graph.nodes()
assert next(iter).kind() == "onnx::Constant"
assert next(iter).kind() == "onnx::Constant"
assert next(iter).kind() == "onnx::Constant"
assert next(iter).kind() == "aten::quantize_per_tensor"
assert next(iter).kind() == "aten::dequantize"
# prim::ListConstruct is exported as onnx::SequenceConstruct for opset >= 11
@skipIfUnsupportedOpsetVersion([11, 12, 13])
def test_prim_fallthrough(self):
# Test prim op
class PrimModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
if isinstance(x, list):
y = x
else:
y = [x]
return y
x = torch.tensor([2])
model = PrimModule()
output = model(x)
model.eval()
graph, _, __ = self._model_to_graph(model, (x,), example_outputs=output,
operator_export_type=OperatorExportTypes.ONNX_FALLTHROUGH,
input_names=['x'], dynamic_axes={'x': [0]})
iter = graph.nodes()
assert next(iter).kind() == "prim::ListConstruct"
def test_custom_layer_tuple(self):
class CustomFunction(torch.autograd.Function):
@staticmethod
def symbolic(g, input):
return g.op("CustomNamespace::Custom", input, outputs=2)
@staticmethod
def forward(ctx, input):
return input, input
class Custom(torch.nn.Module):
def forward(self, input):
return CustomFunction.apply(input)
model = Custom()
batch = torch.FloatTensor(1, 3)
graph, _, _ = self._model_to_graph(model, batch,
input_names=['batch'], dynamic_axes={'batch': [0, 1]})
iter = graph.nodes()
assert next(iter).kind() == "CustomNamespace::Custom"
def test_unused_initializers(self):
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv2 = torch.nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(1, 1))
self.k_proj = torch.nn.Linear(5, 5, bias=True)
def forward(self, x):
x = self.conv2(x)
return x
x = torch.randn(20, 16, 50, 100)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
_, params_dict, __ = self._model_to_graph(Model(), (x, ), do_constant_folding=False,
operator_export_type=OperatorExportTypes.ONNX,
input_names=['x'],
dynamic_axes={'x': [0, 1, 2, 3]})
assert len(params_dict) == 2
def test_scripting_param(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv = torch.nn.Conv2d(3, 16, kernel_size=1, stride=2, padding=3, bias=True)
self.bn = torch.nn.BatchNorm2d(16, affine=True)
def forward(self, x):
x = self.conv(x)
bn = self.bn(x)
return bn
model = torch.jit.script(MyModule())
x = torch.randn(10, 3, 128, 128)
example_outputs = model(x)
f = io.BytesIO()
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = self._model_to_graph(model, (x,), do_constant_folding=True, example_outputs=example_outputs,
operator_export_type=OperatorExportTypes.ONNX,
input_names=['x'], dynamic_axes={'x': [0, 1, 2, 3]})
graph_input_params = [param.debugName() for param in graph.inputs()]
assert all(item in graph_input_params for item in dict(model.named_parameters())), \
"Graph parameter names does not match model parameters."
def test_modifying_params(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.param = torch.nn.Parameter(torch.tensor([2.0]))
def forward(self, x):
y = x * x
self.param.data.add_(1.0)
return y
x = torch.tensor([1, 2])
verify(MyModel(), x, backend, do_constant_folding=False)
def test_fuse_conv_bn(self):
class Fuse(torch.nn.Module):
def __init__(self):
super(Fuse, self).__init__()
self.conv = torch.nn.Conv2d(3, 2, kernel_size=1, stride=2, padding=3, bias=True)
self.bn = torch.nn.BatchNorm2d(2)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
x = torch.randn(2, 3, 2, 2, requires_grad=True)
graph, _, __ = self._model_to_graph(Fuse(), (x, ),
training=TrainingMode.EVAL, input_names=['x'],
dynamic_axes={'x': [0, 1, 2, 3]})
for node in graph.nodes():
assert node.kind() != "onnx::BatchNormalization"
assert node.kind() == "onnx::Conv"
assert len(list(graph.nodes())) == 1
def test_fuse_resnet18(self):
model = torchvision.models.resnet18(pretrained=True)
x = torch.randn(2, 3, 224, 224, requires_grad=True)
graph, _, __ = self._model_to_graph(model, (x, ),
input_names=['x'], dynamic_axes={'x': [0, 1, 2, 3]})
for node in graph.nodes():
assert node.kind() != "onnx::BatchNormalization"
def test_onnx_function_substitution_pass(self):
@torch.jit.script
def f(x : torch.Tensor, y : torch.Tensor):
z = x - y
return x + z
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, x, y):
return f(x, y)
model = MyModule()
input_1 = torch.tensor(11)
input_2 = torch.tensor(12)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = self._model_to_graph(MyModule(), (input_1, input_2), do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX,
input_names=['input_1', 'input_2'],
dynamic_axes={'input_1': [0], 'input_2': [0]})
# Check that the prim::Constant node in the graph for representing the
# scripted function `f` is removed and the following prim::CallFunction
# is replced by inline graph, with onnx::Sub and onnx::Add nodes.
for node in graph.nodes():
assert node.kind() != "prim::Constant"
assert len(list(graph.nodes())) == 2 # onnx::Sub and onnx::Add nodes only.
# opset 10 tests
TestUtilityFuns_opset10 = type(str("TestUtilityFuns_opset10"),
(TestCase,),
dict(TestUtilityFuns.__dict__, opset_version=10))
# opset 11 tests
TestUtilityFuns_opset11 = type(str("TestUtilityFuns_opset11"),
(TestCase,),
dict(TestUtilityFuns.__dict__, opset_version=11))
# opset 12 tests
TestUtilityFuns_opset12 = type(str("TestUtilityFuns_opset12"),
(TestCase,),
dict(TestUtilityFuns.__dict__, opset_version=12))
# opset 13 tests
TestUtilityFuns_opset13 = type(str("TestUtilityFuns_opset13"),
(TestCase,),
dict(TestUtilityFuns.__dict__, opset_version=13))
# opset 11 tests
TestUtilityFuns_opset11_new_jit_API = type(str("TestUtilityFuns_opset11_new_jit_API"),
(TestCase,),
dict(TestUtilityFuns.__dict__, opset_version=11))
# opset 12 tests
TestUtilityFuns_opset12_new_jit_API = type(str("TestUtilityFuns_opset12_new_jit_API"),
(TestCase,),
dict(TestUtilityFuns.__dict__, opset_version=12))
# opset 13 tests
TestUtilityFuns_opset13_new_jit_API = type(str("TestUtilityFuns_opset13_new_jit_API"),
(TestCase,),
dict(TestUtilityFuns.__dict__, opset_version=13))
if __name__ == "__main__":
run_tests()
| [
"[email protected]"
] | |
2bc24f18339ff57dfd8fe78df5b0674bbcea8621 | 1f82b95d45c6eed81a4361c7ed4cdc04789249d3 | /studentassign/domain/Student.py | 07daad24126be7343c576199e8360f26e7d127fd | [] | no_license | andidh/Python | dc06728ba4b9e54a6e9ff52afbbe75d43b855b36 | 8b629d160be541a6955d3799ac91358cecf5986a | refs/heads/master | 2020-12-24T20:10:49.132192 | 2016-04-13T15:34:36 | 2016-04-13T15:34:36 | 56,164,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | '''
Created on Nov 16, 2015
@author: AndiD
'''
class Student:
def __init__(self, student_id, name, group):
"""
:param student_id:
:param name:
:param group:
:return:
"""
self._student_id = student_id
self._name = name
self._group = group
def get_student_id(self):
return self._student_id
def get_name(self):
return self._name
def get_group(self):
return self._group
def set_student_id(self, student_id):
self._student_id = student_id
def set_name(self, name):
self._name = name
def set_group(self, group):
self._group = group
def __repr__(self):
return "{" + str(self._student_id) + ", " + self._name + ", " +str(self._group) + "}"
| [
"[email protected]"
] | |
b8101f1890fe1cde1b9d02a092f473d3bce4a8ba | 93f0d70bea431064897698bef580e07159a2a4a5 | /backend/src/settings.py | 41035d5d9525c4ad66f364e6adc28df508adda00 | [] | no_license | azizcruz/react_rdf_app | 5ba9ffdb18c7d11187d58255ac84d5c37620823e | 11dc836798ba4becdafdd0f04e7ea0164116fc4c | refs/heads/master | 2020-05-04T18:43:26.853445 | 2019-04-07T11:52:41 | 2019-04-07T11:52:41 | 179,364,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,446 | py | """
Django settings for src project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'bootstrap_datepicker_plus',
'todo.apps.TodoConfig',
'corsheaders',
'rest_framework',
'todo_api.apps.TodoApiConfig'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'src.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'src.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
BOOTSTRAP4 = {
'include_jquery': True,
}
# we whitelist localhost:3000 because that's where frontend will be served
CORS_ORIGIN_WHITELIST = (
'localhost:3000/'
) | [
"[email protected]"
] | |
11e0ab6427e70a176bc90a0504c0b2891bee1de9 | 1244d693ae8d7d68721f972de82970c321b2f06f | /examples/network/create.py | e948ccc8a51f10108ac3879164c846742f42f31e | [
"Apache-2.0"
] | permissive | jasonzhuyx/python-openstacksdk | ae3b07b0729a55a2ab2faceee23ee6a8eb20b43a | 087140278d8c2e3f457093375bc480bd0045f86f | refs/heads/master | 2020-04-08T19:39:13.634700 | 2015-09-03T19:20:07 | 2015-09-03T19:30:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,353 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Network examples
Create all the pieces parts to have a working network.
To run:
python examples/network/create.py
"""
import sys
from examples import common
from examples import connection
def create(conn, name, opts, ports_to_open=[80, 22]):
dns_nameservers = opts.data.pop('dns_nameservers', '206.164.176.34')
cidr = opts.data.pop('cidr', '10.3.3.0/24')
network = conn.network.find_network(name)
if network is None:
network = conn.network.create_network(name=name)
print(str(network))
subnet = conn.network.find_subnet(name)
if subnet is None:
args = {
"name": name,
"network_id": network.id,
"ip_version": "4",
"dns_nameservers": [dns_nameservers],
"cidr": cidr,
}
subnet = conn.network.create_subnet(**args)
print(str(subnet))
extnet = conn.network.find_network("Ext-Net")
router = conn.network.find_router(name)
if router is None:
args = {
"name": name,
"external_gateway_info": {"network_id": extnet.id}
}
router = conn.network.create_router(**args)
conn.network.router_add_interface(router, subnet.id)
print(str(router))
sg = conn.network.find_security_group(name)
if sg is None:
sg = conn.network.create_security_group(name=name)
for port in ports_to_open:
conn.network.security_group_open_port(sg.id, port)
conn.network.security_group_allow_ping(sg.id)
print(str(sg))
return network
def run_network(opts):
name = opts.data.pop('name', 'netty')
conn = connection.make_connection(opts)
return(create(conn, name, opts))
if __name__ == "__main__":
opts = common.setup()
sys.exit(common.main(opts, run_network))
| [
"[email protected]"
] | |
4722d5e476f88f65763e3bd8d8ad036dc9ae67e2 | 87ad372898e793faf1ad89f4bb3b6e84a8002131 | /tests/unit/Strategy/test_set_next_time_lock.py | 5db244ebd53e53b7059dbf9c6899718d72115239 | [] | no_license | atsignhandle/unagii-vault-v2 | 6a9a96c11d34257bc3fdae57455ec3b2f9c0029a | 548f715f34329eb5abebffe40acbeb56a31cb6f3 | refs/heads/main | 2023-08-27T00:59:48.080152 | 2021-09-28T02:47:36 | 2021-09-28T02:47:36 | 413,448,825 | 0 | 0 | null | 2021-10-04T14:07:37 | 2021-10-04T14:07:36 | null | UTF-8 | Python | false | false | 410 | py | import brownie
import pytest
def test_set_next_time_lock(strategyTest, user):
timeLock = strategyTest.timeLock()
# not time lock
with brownie.reverts("!time lock"):
strategyTest.setNextTimeLock(user, {"from": user})
tx = strategyTest.setNextTimeLock(user, {"from": timeLock})
assert strategyTest.nextTimeLock() == user
assert tx.events["SetNextTimeLock"].values() == [user]
| [
"[email protected]"
] | |
578b5939179d1b07ba88a691c7e64e34ab5f3c0c | 61fb12fd550291bd59c15b244a99fd9394cbcbc2 | /wajju.py | a86073946bb0a59e7493e9d803fbd0c3d2364154 | [] | no_license | Mujju-palaan/Python | 9ab304a826964ef8d6643e326293e85631f03880 | a5b14736954e87596974690e9706bcb227b38b82 | refs/heads/master | 2020-06-12T19:55:45.327317 | 2019-08-08T05:23:11 | 2019-08-08T05:23:11 | 194,407,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | import math
def sum(numbers):
return number1 + number2
print("Enter a number :")
number1 = int(raw_input())
print("Enter another number")
number2 = int(raw_input())
print("The sum is :" + str(number1) + str(number2))
| [
"[email protected]"
] | |
c0f831999f30b8f74fdaa9e12c85f7536f8dcb6f | 5168da0fb501135a3c86e4e95679f54a825d69d0 | /openquake/hazardlib/calc/disagg.py | 99d1c16a083181d80620dce869ef0b10282f0ec3 | [
"AGPL-3.0-only",
"BSD-3-Clause"
] | permissive | GFZ-Centre-for-Early-Warning/shakyground | 266b29c05ea2cfff6d9d61f21b5114282c6fa117 | 0da9ba5a575360081715e8b90c71d4b16c6687c8 | refs/heads/master | 2023-06-01T21:41:11.127323 | 2018-10-09T10:31:48 | 2018-10-09T10:31:48 | 144,732,068 | 1 | 3 | BSD-3-Clause | 2019-11-18T07:58:49 | 2018-08-14T14:32:50 | Python | UTF-8 | Python | false | false | 19,356 | py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`openquake.hazardlib.calc.disagg` contains
:func:`disaggregation` as well as several aggregation functions for
extracting a specific PMF from the result of :func:`disaggregation`.
"""
import sys
import warnings
import operator
import collections
import numpy
import scipy.stats
from openquake.baselib.python3compat import raise_
from openquake.baselib.performance import Monitor
from openquake.baselib.hdf5 import ArrayWrapper
from openquake.baselib.general import AccumDict, pack, groupby
from openquake.hazardlib.calc import filters
from openquake.hazardlib.imt import from_string
from openquake.hazardlib.geo.geodetic import npoints_between
from openquake.hazardlib.geo.utils import get_longitudinal_extent
from openquake.hazardlib.geo.utils import cross_idl
from openquake.hazardlib.site import SiteCollection
from openquake.hazardlib.gsim.base import ContextMaker
def _imls(curves, poe, imt, imls, rlzi):
if poe is None: # iml_disagg was set
return imls
# else return interpolated intensity measure levels
levels = [numpy.interp(poe, curve[rlzi][imt][::-1], imls[::-1])
if curve else numpy.nan for curve in curves]
return numpy.array(levels) # length N
def make_iml4(R, iml_disagg, imtls=None, poes_disagg=(None,), curves=()):
"""
:returns: an ArrayWrapper over a 4D array of shape (N, R, M, P)
"""
if imtls is None:
imtls = {imt: [iml] for imt, iml in iml_disagg.items()}
N = len(curves) or 1
M = len(imtls)
P = len(poes_disagg)
arr = numpy.zeros((N, R, M, P))
imts = [from_string(imt) for imt in imtls]
for m, imt in enumerate(imtls):
imls = imtls[imt]
for p, poe in enumerate(poes_disagg):
for r in range(R):
arr[:, r, m, p] = _imls(curves, poe, imt, imls, r)
return ArrayWrapper(arr, dict(poes_disagg=poes_disagg, imts=imts))
def collect_bin_data(sources, sitecol, cmaker, iml4,
truncation_level, n_epsilons, monitor=Monitor()):
"""
:param sources: a list of sources
:param sitecol: a SiteCollection instance
:param cmaker: a ContextMaker instance
:param iml4: an ArrayWrapper of intensities of shape (N, R, M, P)
:param truncation_level: the truncation level
:param n_epsilons: the number of epsilons
:param monitor: a Monitor instance
:returns: a dictionary (poe, imt, rlzi) -> probabilities of shape (N, E)
"""
# NB: instantiating truncnorm is slow and calls the infamous "doccer"
truncnorm = scipy.stats.truncnorm(-truncation_level, truncation_level)
epsilons = numpy.linspace(truncnorm.a, truncnorm.b, n_epsilons + 1)
acc = AccumDict(accum=[])
for source in sources:
with cmaker.ir_mon:
ruptures = list(source.iter_ruptures())
try:
acc += cmaker.disaggregate(
sitecol, ruptures, iml4, truncnorm, epsilons, monitor)
except Exception as err:
etype, err, tb = sys.exc_info()
msg = 'An error occurred with source id=%s. Error: %s'
msg %= (source.source_id, err)
raise_(etype, msg, tb)
return pack(acc, 'mags dists lons lats'.split())
def lon_lat_bins(bb, coord_bin_width):
"""
Define bin edges for disaggregation histograms.
Given bins data as provided by :func:`collect_bin_data`, this function
finds edges of histograms, taking into account maximum and minimum values
of magnitude, distance and coordinates as well as requested sizes/numbers
of bins.
"""
west, south, east, north = bb
west = numpy.floor(west / coord_bin_width) * coord_bin_width
east = numpy.ceil(east / coord_bin_width) * coord_bin_width
lon_extent = get_longitudinal_extent(west, east)
lon_bins, _, _ = npoints_between(
west, 0, 0, east, 0, 0,
numpy.round(lon_extent / coord_bin_width + 1))
lat_bins = coord_bin_width * numpy.arange(
int(numpy.floor(south / coord_bin_width)),
int(numpy.ceil(north / coord_bin_width) + 1))
return lon_bins, lat_bins
def get_shape(bin_edges, sid):
"""
:returns:
the shape of the disaggregation matrix for the given site, of form
(#mags-1, #dists-1, #lons-1, #lats-1, #eps-1)
"""
mag_bins, dist_bins, lon_bins, lat_bins, eps_bins = bin_edges
return (len(mag_bins) - 1, len(dist_bins) - 1,
len(lon_bins[sid]) - 1, len(lat_bins[sid]) - 1, len(eps_bins) - 1)
# this is fast
def build_disagg_matrix(bdata, bin_edges, sid, mon=Monitor):
"""
:param bdata: a dictionary of probabilities of no exceedence
:param bin_edges: bin edges
:param sid: site index
:param mon: a Monitor instance
:returns: a dictionary key -> matrix|pmf for each key in bdata
"""
with mon('build_disagg_matrix'):
mag_bins, dist_bins, lon_bins, lat_bins, eps_bins = bin_edges
dim1, dim2, dim3, dim4, dim5 = shape = get_shape(bin_edges, sid)
# find bin indexes of rupture attributes; bins are assumed closed
# on the lower bound, and open on the upper bound, that is [ )
# longitude values need an ad-hoc method to take into account
# the 'international date line' issue
# the 'minus 1' is needed because the digitize method returns the
# index of the upper bound of the bin
mags_idx = numpy.digitize(bdata.mags, mag_bins) - 1
dists_idx = numpy.digitize(bdata.dists[:, sid], dist_bins) - 1
lons_idx = _digitize_lons(bdata.lons[:, sid], lon_bins[sid])
lats_idx = numpy.digitize(bdata.lats[:, sid], lat_bins[sid]) - 1
# because of the way numpy.digitize works, values equal to the last bin
# edge are associated to an index equal to len(bins) which is not a
# valid index for the disaggregation matrix. Such values are assumed
# to fall in the last bin
mags_idx[mags_idx == dim1] = dim1 - 1
dists_idx[dists_idx == dim2] = dim2 - 1
lons_idx[lons_idx == dim3] = dim3 - 1
lats_idx[lats_idx == dim4] = dim4 - 1
out = {}
cache = {}
cache_hit = 0
num_zeros = 0
for k, allpnes in bdata.items():
pnes = allpnes[:, sid, :] # shape (U, N, E)
cache_key = pnes.sum()
if cache_key == pnes.size: # all pnes are 1
num_zeros += 1
continue # zero matrices are not transferred
try:
matrix = cache[cache_key]
cache_hit += 1
except KeyError:
mat = numpy.ones(shape)
for i_mag, i_dist, i_lon, i_lat, pne in zip(
mags_idx, dists_idx, lons_idx, lats_idx, pnes):
mat[i_mag, i_dist, i_lon, i_lat] *= pne
matrix = 1. - mat
cache[cache_key] = matrix
out[k] = matrix
# operations, hits, num_zeros
if hasattr(mon, 'cache_info'):
mon.cache_info += numpy.array([len(bdata), cache_hit, num_zeros])
else:
mon.cache_info = numpy.array([len(bdata), cache_hit, num_zeros])
return out
def _digitize_lons(lons, lon_bins):
"""
Return indices of the bins to which each value in lons belongs.
Takes into account the case in which longitude values cross the
international date line.
:parameter lons:
An instance of `numpy.ndarray`.
:parameter lons_bins:
An instance of `numpy.ndarray`.
"""
if cross_idl(lon_bins[0], lon_bins[-1]):
idx = numpy.zeros_like(lons, dtype=numpy.int)
for i_lon in range(len(lon_bins) - 1):
extents = get_longitudinal_extent(lons, lon_bins[i_lon + 1])
lon_idx = extents > 0
if i_lon != 0:
extents = get_longitudinal_extent(lon_bins[i_lon], lons)
lon_idx &= extents >= 0
idx[lon_idx] = i_lon
return numpy.array(idx)
else:
return numpy.digitize(lons, lon_bins) - 1
def disaggregation(
sources, site, imt, iml, gsim_by_trt, truncation_level,
n_epsilons, mag_bin_width, dist_bin_width, coord_bin_width,
source_filter=filters.source_site_noop_filter, filter_distance='rjb'):
"""
Compute "Disaggregation" matrix representing conditional probability of an
intensity mesaure type ``imt`` exceeding, at least once, an intensity
measure level ``iml`` at a geographical location ``site``, given rupture
scenarios classified in terms of:
- rupture magnitude
- Joyner-Boore distance from rupture surface to site
- longitude and latitude of the surface projection of a rupture's point
closest to ``site``
- epsilon: number of standard deviations by which an intensity measure
level deviates from the median value predicted by a GSIM, given the
rupture parameters
- rupture tectonic region type
In other words, the disaggregation matrix allows to compute the probability
of each scenario with the specified properties (e.g., magnitude, or the
magnitude and distance) to cause one or more exceedences of a given hazard
level.
For more detailed information about the disaggregation, see for instance
"Disaggregation of Seismic Hazard", Paolo Bazzurro, C. Allin Cornell,
Bulletin of the Seismological Society of America, Vol. 89, pp. 501-520,
April 1999.
:param sources:
Seismic source model, as for
:mod:`PSHA <openquake.hazardlib.calc.hazard_curve>` calculator it
should be an iterator of seismic sources.
:param site:
:class:`~openquake.hazardlib.site.Site` of interest to calculate
disaggregation matrix for.
:param imt:
Instance of :mod:`intensity measure type <openquake.hazardlib.imt>`
class.
:param iml:
Intensity measure level. A float value in units of ``imt``.
:param gsim_by_trt:
Tectonic region type to GSIM objects mapping.
:param truncation_level:
Float, number of standard deviations for truncation of the intensity
distribution.
:param n_epsilons:
Integer number of epsilon histogram bins in the result matrix.
:param mag_bin_width:
Magnitude discretization step, width of one magnitude histogram bin.
:param dist_bin_width:
Distance histogram discretization step, in km.
:param coord_bin_width:
Longitude and latitude histograms discretization step,
in decimal degrees.
:param source_filter:
Optional source-site filter function. See
:mod:`openquake.hazardlib.calc.filters`.
:returns:
A tuple of two items. First is itself a tuple of bin edges information
for (in specified order) magnitude, distance, longitude, latitude,
epsilon and tectonic region types.
Second item is 6d-array representing the full disaggregation matrix.
Dimensions are in the same order as bin edges in the first item
of the result tuple. The matrix can be used directly by pmf-extractor
functions.
"""
trts = sorted(set(src.tectonic_region_type for src in sources))
trt_num = dict((trt, i) for i, trt in enumerate(trts))
rlzs_by_gsim = {gsim_by_trt[trt]: [0] for trt in trts}
cmaker = ContextMaker(rlzs_by_gsim, source_filter.integration_distance,
filter_distance)
iml4 = make_iml4(1, {str(imt): iml})
by_trt = groupby(sources, operator.attrgetter('tectonic_region_type'))
bdata = {}
sitecol = SiteCollection([site])
for trt, srcs in by_trt.items():
bdata[trt] = collect_bin_data(
srcs, sitecol, cmaker, iml4, truncation_level, n_epsilons)
if sum(len(bd.mags) for bd in bdata.values()) == 0:
warnings.warn(
'No ruptures have contributed to the hazard at site %s'
% site, RuntimeWarning)
return None, None
min_mag = min(bd.mags.min() for bd in bdata.values())
max_mag = max(bd.mags.max() for bd in bdata.values())
mag_bins = mag_bin_width * numpy.arange(
int(numpy.floor(min_mag / mag_bin_width)),
int(numpy.ceil(max_mag / mag_bin_width) + 1))
min_dist = min(bd.dists.min() for bd in bdata.values())
max_dist = max(bd.dists.max() for bd in bdata.values())
dist_bins = dist_bin_width * numpy.arange(
int(numpy.floor(min_dist / dist_bin_width)),
int(numpy.ceil(max_dist / dist_bin_width) + 1))
bb = (min(bd.lons.min() for bd in bdata.values()),
min(bd.lats.min() for bd in bdata.values()),
max(bd.lons.max() for bd in bdata.values()),
max(bd.lats.max() for bd in bdata.values()))
lon_bins, lat_bins = lon_lat_bins(bb, coord_bin_width)
eps_bins = numpy.linspace(-truncation_level, truncation_level,
n_epsilons + 1)
bin_edges = (mag_bins, dist_bins, [lon_bins], [lat_bins], eps_bins)
matrix = numpy.zeros((len(mag_bins) - 1, len(dist_bins) - 1,
len(lon_bins) - 1, len(lat_bins) - 1,
len(eps_bins) - 1, len(trts)))
for trt in bdata:
dic = build_disagg_matrix(bdata[trt], bin_edges, sid=0)
if dic: # (poe, imt, rlzi) -> matrix
[mat] = dic.values()
matrix[..., trt_num[trt]] = mat
return bin_edges + (trts,), matrix
def mag_pmf(matrix):
"""
Fold full disaggregation matrix to magnitude PMF.
:returns:
1d array, a histogram representing magnitude PMF.
"""
nmags, ndists, nlons, nlats, neps = matrix.shape
mag_pmf = numpy.zeros(nmags)
for i in range(nmags):
mag_pmf[i] = numpy.prod(
[1. - matrix[i, j, k, l, m]
for j in range(ndists)
for k in range(nlons)
for l in range(nlats)
for m in range(neps)])
return 1. - mag_pmf
def dist_pmf(matrix):
"""
Fold full disaggregation matrix to distance PMF.
:returns:
1d array, a histogram representing distance PMF.
"""
nmags, ndists, nlons, nlats, neps = matrix.shape
dist_pmf = numpy.zeros(ndists)
for j in range(ndists):
dist_pmf[j] = numpy.prod(
[1. - matrix[i, j, k, l, m]
for i in range(nmags)
for k in range(nlons)
for l in range(nlats)
for m in range(neps)])
return 1. - dist_pmf
def trt_pmf(matrices):
"""
Fold full disaggregation matrix to tectonic region type PMF.
:param matrices:
a matrix with T submatrices
:returns:
an array of T probabilities one per each tectonic region type
"""
ntrts, nmags, ndists, nlons, nlats, neps = matrices.shape
pmf = numpy.zeros(ntrts)
for t in range(ntrts):
pmf[t] = 1. - numpy.prod(
[1. - matrices[t, i, j, k, l, m]
for i in range(nmags)
for j in range(ndists)
for k in range(nlons)
for l in range(nlats)
for m in range(neps)])
return pmf
def mag_dist_pmf(matrix):
"""
Fold full disaggregation matrix to magnitude / distance PMF.
:returns:
2d array. First dimension represents magnitude histogram bins,
second one -- distance histogram bins.
"""
nmags, ndists, nlons, nlats, neps = matrix.shape
mag_dist_pmf = numpy.zeros((nmags, ndists))
for i in range(nmags):
for j in range(ndists):
mag_dist_pmf[i, j] = numpy.prod(
[1. - matrix[i, j, k, l, m]
for k in range(nlons)
for l in range(nlats)
for m in range(neps)])
return 1. - mag_dist_pmf
def mag_dist_eps_pmf(matrix):
"""
Fold full disaggregation matrix to magnitude / distance / epsilon PMF.
:returns:
3d array. First dimension represents magnitude histogram bins,
second one -- distance histogram bins, third one -- epsilon
histogram bins.
"""
nmags, ndists, nlons, nlats, neps = matrix.shape
mag_dist_eps_pmf = numpy.zeros((nmags, ndists, neps))
for i in range(nmags):
for j in range(ndists):
for m in range(neps):
mag_dist_eps_pmf[i, j, m] = numpy.prod(
[1. - matrix[i, j, k, l, m]
for k in range(nlons)
for l in range(nlats)])
return 1. - mag_dist_eps_pmf
def lon_lat_pmf(matrix):
"""
Fold full disaggregation matrix to longitude / latitude PMF.
:returns:
2d array. First dimension represents longitude histogram bins,
second one -- latitude histogram bins.
"""
nmags, ndists, nlons, nlats, neps = matrix.shape
lon_lat_pmf = numpy.zeros((nlons, nlats))
for k in range(nlons):
for l in range(nlats):
lon_lat_pmf[k, l] = numpy.prod(
[1. - matrix[i, j, k, l, m]
for i in range(nmags)
for j in range(ndists)
for m in range(neps)])
return 1. - lon_lat_pmf
def lon_lat_trt_pmf(matrices):
"""
Fold full disaggregation matrices to lon / lat / TRT PMF.
:param matrices:
a matrix with T submatrices
:returns:
3d array. First dimension represents longitude histogram bins,
second one latitude histogram bins, third one trt histogram bins.
"""
res = numpy.array([lon_lat_pmf(mat) for mat in matrices])
return res.transpose(1, 2, 0)
def mag_lon_lat_pmf(matrix):
"""
Fold full disaggregation matrix to magnitude / longitude / latitude PMF.
:returns:
3d array. First dimension represents magnitude histogram bins,
second one -- longitude histogram bins, third one -- latitude
histogram bins.
"""
nmags, ndists, nlons, nlats, neps = matrix.shape
mag_lon_lat_pmf = numpy.zeros((nmags, nlons, nlats))
for i in range(nmags):
for k in range(nlons):
for l in range(nlats):
mag_lon_lat_pmf[i, k, l] = numpy.prod(
[1. - matrix[i, j, k, l, m]
for j in range(ndists)
for m in range(neps)])
return 1. - mag_lon_lat_pmf
# this dictionary is useful to extract a fixed set of
# submatrices from the full disaggregation matrix
pmf_map = collections.OrderedDict([
('Mag', mag_pmf),
('Dist', dist_pmf),
('TRT', trt_pmf),
('Mag_Dist', mag_dist_pmf),
('Mag_Dist_Eps', mag_dist_eps_pmf),
('Lon_Lat', lon_lat_pmf),
('Mag_Lon_Lat', mag_lon_lat_pmf),
('Lon_Lat_TRT', lon_lat_trt_pmf),
])
| [
"[email protected]"
] | |
dbf428402a937cb09a98af44a2e148b105a3368f | 77c641fd0708b279dddbe01f6af32a8531b93185 | /marketsim/gen/_out/math/Moving/_Min.py | 7cde0c4e008cb74439117ad736967208c24d45c3 | [] | no_license | abensrhir/marketsimulator | aea286afd2bb2e0c8a547bfa879601aef21c0cd5 | f9f55c72fb34cdbec42b96737ca20839f26c6299 | refs/heads/master | 2020-12-13T20:55:55.795344 | 2014-02-24T22:52:24 | 2014-02-24T22:52:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,660 | py | from marketsim import registry
from marketsim.gen._out._observable import Observablefloat
from marketsim.gen._intrinsic.observable.minmax import Min_Impl
from marketsim.gen._out._iobservable import IObservablefloat
@registry.expose(["Statistics", "Min"])
class Min_IObservableFloatFloat(Observablefloat,Min_Impl):
"""
"""
def __init__(self, source = None, timeframe = None):
from marketsim.gen._out._observable import Observablefloat
from marketsim.gen._out._const import const_Float as _const_Float
from marketsim import event
from marketsim import rtti
Observablefloat.__init__(self)
self.source = source if source is not None else _const_Float(1.0)
event.subscribe(self.source, self.fire, self)
self.timeframe = timeframe if timeframe is not None else 100.0
rtti.check_fields(self)
Min_Impl.__init__(self)
@property
def label(self):
return repr(self)
_properties = {
'source' : IObservablefloat,
'timeframe' : float
}
def __repr__(self):
return "Min_{n=%(timeframe)s}(%(source)s)" % self.__dict__
def Min(source = None,timeframe = None):
from marketsim.gen._out._iobservable import IObservablefloat
from marketsim import rtti
if source is None or rtti.can_be_casted(source, IObservablefloat):
if timeframe is None or rtti.can_be_casted(timeframe, float):
return Min_IObservableFloatFloat(source,timeframe)
raise Exception('Cannot find suitable overload for Min('+str(source) +':'+ str(type(source))+','+str(timeframe) +':'+ str(type(timeframe))+')')
| [
"[email protected]"
] | |
6e06a75c02bd30795563b83eb66fd76197ba0f57 | ac5cba0f382ff833e215b3aec164cd70ce86572a | /tests/controllers/test_validation.py | f8c5d107be8ceca233d8c863cbf0a8a6a75545a4 | [] | no_license | numberoverzero/moldyboot | c724141d4db6ec1dc7be550cfb95de135ea19ac0 | 10bec1e76ddb6c9f8d826936056eb7730b64bdd7 | refs/heads/master | 2021-09-10T15:58:26.137509 | 2017-11-27T08:58:19 | 2017-11-27T08:58:19 | 60,145,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,063 | py | import base64
import uuid
import bcrypt
import pytest
from cryptography.hazmat.primitives import serialization
from tests.helpers import as_der
from moldyboot.controllers import InvalidParameter, validate
from moldyboot.security.jwk import i2b64
valid_uuids = [
uuid.uuid1(),
uuid.uuid4()
]
invalid_uuids = [
None,
"",
"not a uuid",
]
valid_usernames = ["abc", "aaa", "a00"]
invalid_usernames = ["", "aa", "ab!", "0ab"]
invalid_emails = ["", "a@", "@a", "aaa"]
valid_emails = ["a@c", "!@!", "@@@"]
invalid_signatures = [
"",
# missing sections
'''Signature headers="" id="@"''',
'''Signature headers="" signature=""''',
'''Signature id="@" signature=""''',
# out of order
'''Signature id="@" headers="" signature=""''',
# capitalization
'''Signature HEADERS="" ID="@" SIGNATURE=""''',
# quote style
"""Signature headers='' id='@' signature=''""",
# bad id
'''Signature headers="" id="" signature=""''',
# extra whitespace
''' Signature headers="" id="@" signature=""''',
'''Signature headers="" id="@" signature=""''',
'''Signature headers="" id="@" signature="" '''
]
def test_validate_unknown_parameter():
with pytest.raises(KeyError):
validate("not a real parameter name", "unused value")
@pytest.mark.parametrize("parameter_name", ["user_id", "key_id", "verification_code"])
@pytest.mark.parametrize("valid_uuid", valid_uuids)
def test_valid_uuid(parameter_name, valid_uuid):
same = validate(parameter_name, valid_uuid)
also_same = validate(parameter_name, str(valid_uuid))
assert valid_uuid == same == also_same
@pytest.mark.parametrize("parameter_name", ["user_id", "key_id"])
@pytest.mark.parametrize("invalid_uuid", invalid_uuids)
def test_invalid_uuid(parameter_name, invalid_uuid):
with pytest.raises(InvalidParameter) as excinfo:
validate(parameter_name, invalid_uuid)
exception = excinfo.value
assert parameter_name == exception.parameter_name
assert invalid_uuid == exception.value
assert "must be a UUID" == exception.message
@pytest.mark.parametrize("invalid_signature", invalid_signatures)
def test_invalid_authorization_header(invalid_signature):
with pytest.raises(InvalidParameter) as excinfo:
validate("authorization_header", invalid_signature)
assert "authorization_header" == excinfo.value.parameter_name
assert invalid_signature == excinfo.value.value
def test_valid_authorization_header():
valid = '''Signature headers="a" id="b@c" signature="d"'''
expected = {
"headers": "a",
"user_id": "b",
"key_id": "c",
"signature": "d"}
actual = validate("authorization_header", valid)
assert actual == expected
@pytest.mark.parametrize("valid_email", valid_emails)
def test_valid_email(valid_email):
assert validate("email", valid_email) == valid_email
@pytest.mark.parametrize("invalid_email", invalid_emails)
def test_invalid_email(invalid_email):
with pytest.raises(InvalidParameter) as excinfo:
validate("email", invalid_email)
assert "email" == excinfo.value.parameter_name
@pytest.mark.parametrize("valid_username", valid_usernames)
def test_valid_username(valid_username):
assert validate("username", valid_username) == valid_username
@pytest.mark.parametrize("invalid_username", invalid_usernames)
def test_invalid_username(invalid_username):
with pytest.raises(InvalidParameter) as excinfo:
validate("username", invalid_username)
assert "username" == excinfo.value.parameter_name
def test_valid_public_key(rsa_pub):
valid_keys = [
# RSAPublicKey
rsa_pub,
# DER
rsa_pub.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo
),
rsa_pub.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.PKCS1
),
# PEM
rsa_pub.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
),
rsa_pub.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.PKCS1
),
rsa_pub.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
).decode("utf-8"),
# JWK
{
"n": i2b64(rsa_pub.public_numbers().n),
"e": i2b64(rsa_pub.public_numbers().e)
}
]
for valid_key in valid_keys:
validated = validate("public_key", valid_key)
assert as_der(validated) == as_der(rsa_pub)
def test_invalid_public_key(rsa_pub):
# base64 of DER encoding fails (just use PEM)
encoded_bytes = base64.b64encode(as_der(rsa_pub))
invalid_keys = [
encoded_bytes,
encoded_bytes.decode("utf-8"), # as string
"",
b""
]
for invalid_key in invalid_keys:
with pytest.raises(InvalidParameter) as excinfo:
validate("public_key", invalid_key)
assert "public_key" == excinfo.value.parameter_name
def test_valid_password_hash():
hash = bcrypt.hashpw(b"hunter2", bcrypt.gensalt(4))
assert hash == validate("password_hash", hash)
assert hash == validate("password_hash", hash.decode("utf-8"))
def test_invalid_password_hash():
invalid_hashes = [
"$2a$06$" + "a"*53, # Wrong type (2a, not 2b)
"$2b$aa$" + "a"*53, # rounds must be decimals
"$2b$06$" + "a"*52, # Wrong salt+hash length
"$2b$o6$" + "a"*54, # Wrong salt+hash length
"$2b$o6$" + "?"*53, # Invalid base64 character
"$2b$o6$" + "+"*53, # Nonstandard b64 doesn't include +
]
for invalid_hash in invalid_hashes:
with pytest.raises(InvalidParameter) as excinfo:
validate("password_hash", invalid_hash)
assert "password_hash" == excinfo.value.parameter_name
| [
"[email protected]"
] | |
7dd73e917d89ec04f027cae79d65301d6b7c2939 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2390/60737/268647.py | 2acafd5ca353caea1d158f69e30a7ae788966398 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,349 | py | n=int(input())
src=[int(x) for x in input().split()]
ans = 0
def swap_in_array(x,y,k):
for i in range(2**k):
src[x+i],src[y+i]=src[y+i],src[x+i]
def fac(x):
if x<=1:
return 1
return x*fac(x-1)
def judge(start,kind):
for i in range(1,2**kind):
if src[start+i]!=src[start+i-1]+1:
return True
return False
def dfs(kind,before):
global ans
if kind == n + 1:
ans += fac(before)
return
opt1, opt2 = -1, -1
l=2**(kind-1)
for i in range(0,2**n,2*l):
if judge(i,kind):
if opt1==-1:
opt1=i
elif opt2==-1:
opt2=i
else:
break
if opt1==-1 and opt2==-1:
dfs(kind+1,before)
return
elif opt1!=-1 and opt2==-1:
swap_in_array(opt1,opt1+l,kind-1)
dfs(kind+1,before+1)
swap_in_array(opt1, opt1 + l, kind - 1)
elif opt1!=-1 and opt2!=-1:
for i in range(0,l+1,l):
for j in range(0,l+1,l):
swap_in_array(opt1+i,opt2+j,kind-1)
if not judge(opt1,kind) and not judge(opt2,kind):
dfs(kind+1,before+1)
swap_in_array(opt1 + i, opt2 + j, kind-1)
break
swap_in_array(opt1 + i, opt2 + j, kind-1)
dfs(1,0)
print(ans)
| [
"[email protected]"
] | |
a9bbe242539826333f6424970c7f1b684aafed56 | 1974b3e9c5f2f677833e1608a41281f377fd331c | /dltesthttp_xuyalin2/www/testcase/webservice/ts_ws_approval/getApprovalList.py | 204c3767506929d7fc4d05dceab08d9873afb545 | [] | no_license | xyl00755/pythonLearning | ed0f540b61247c3560f347853da5886b2e2ba25d | c6aecff86ff34dcd7358d98201627ff84e9bf2cf | refs/heads/master | 2021-01-13T08:19:25.171016 | 2016-12-16T05:43:10 | 2016-12-16T05:43:10 | 71,764,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,436 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from www.api.webservice import *
from www.common.database import *
from www.common.excel import *
"""
0033.经销商管理员获取我的丹露终端店审批列表
http://127.0.0.1:8280/mallws/mydl/approval/getApprovalList.json
{
"token":"123", // 必须
"approvalStatus":"0", // 必须 审批状态 0-待审批 1-已审批
"page":"1", // 必须 第几页
"rows":"15" // 必须 每页条数
}
{
"code": 200,
"description": "执行成功!",
"model": {
"success": "0", // 成功 0-成功
"approvalList": [
{
"approvalId": "TAL9807720151000463", // 审批id
"taskId": "", // 工作流taskId
"approvalStatus": "2", // 审批状态 0-待审批 1-已同意 2-已拒绝
"terminalLoginName": "s", // 终端店用户名
"terminalFullName": "s", // 终端店名
"terminalFullAddr": "辽宁大连市西岗区黄浦路977号" // 终端店全地址
}
]
},
"metadata": {
"type": 0,
"clazz": "cn.com.hd.mall.web.webservices.entity.response.mydl.approval.ApprovalListResponse"
}
}
参数校验:
approvalStatus @NotNull @Pattern(regexp = "0|1")
code说明:
100-token失效 200-成功 300-错误的角色(无权限) 400-非法的参数 500-服务器异常 600-重新登陆
"""
class getApprovalList(unittest.TestCase):
UserShop=wsData('DealMager')
#经销商销售员
UserShop2=wsData('DealSaler')
#经销商采购员
UserShop6=wsData('DealBuyer')
#经销商配送员
UserShop4=wsData('DealSeder')
#经销商财务员
UserShop5=wsData('DealFiner')
UserShop3 = wsData('RegistTmlShop')
def setUp(self):
applyId=select_one('select apply_id from dlworkflow.dl_apply_terminal where terminal_tel=?',self.UserShop3.registerTel)
if applyId!=None:
update('delete from dlworkflow.act_hi_procinst where BUSINESS_KEY_=?',str(applyId.apply_id))
update('delete from dlworkflow.dl_apply_terminal where terminal_tel=?',self.UserShop3.registerTel)
#正确获取待审批列表(无待审批数据的情况下)
def test_getApprovalList(self):
ws=webservice()
ws.login(self.UserShop.username,self.UserShop.password)
getList=ws.getApprovalList(approvalStatus='0',page='1',rows='15')
self.assertEqual(getList.model['success'],'0')
self.assertEqual(getList.model['approvalList'],[])
#正确获取待审批列表(有待审批数据的情况下)
def test_getApprovalList_existApprove(self):
ws=webservice()
ws.login(self.UserShop.username,self.UserShop.password)
#注册一条待审批的数据
tmlRegist=ws.terminalRegistApprove(terminalLoginName=self.UserShop3.username,password=self.UserShop3.password,registerTel=self.UserShop3.registerTel,verificationCode='1111',invitationCode=self.UserShop.invitationCode,
terminalFullName=self.UserShop3.fullName,businessLicenseCode=self.UserShop3.busLicenseNum,storeTypeCode=self.UserShop3.storeTypeCode,terminalAreaProvinceCode=self.UserShop3.areaProvinceCode,
terminalAreaCityCode=self.UserShop3.areaCityCode,terminalAreaDistrictCode=self.UserShop3.areaDistrictCode,terminalAddress=self.UserShop3.localStreet)
self.assertEqual(tmlRegist.model['success'],'0')
self.assertEqual(tmlRegist.model['checkResult'],None)
getList=ws.getApprovalList(approvalStatus='0',page='1',rows='15')
self.assertEqual(getList.model['success'],'0')
self.assertEqual(getList.model['approvalList'][0]['terminalLoginName'],self.UserShop3.username)
self.assertEqual(getList.model['approvalList'][0]['terminalFullName'],self.UserShop3.fullName)
self.assertEqual(getList.model['approvalList'][0]['terminalFullAddr'],self.UserShop3.localProvince+'-'+self.UserShop3.localCity+'-'+self.UserShop3.localCountry+'-'+self.UserShop3.localStreet)
approvid=getList.model['approvalList'][0]['approvalId']
update('delete from dlworkflow.act_hi_procinst where BUSINESS_KEY_=?',approvid)
#正确获取已审批列表(无已审批数据的情况下)
def test_getApprovalList_notExistRefuse(self):
ws=webservice()
ws.login(self.UserShop.username,self.UserShop.password)
#注册一条待审批的数据
tmlRegist=ws.terminalRegistApprove(terminalLoginName=self.UserShop3.username,password=self.UserShop3.password,registerTel=self.UserShop3.registerTel,verificationCode='1111',invitationCode=self.UserShop.invitationCode,
terminalFullName=self.UserShop3.fullName,businessLicenseCode=self.UserShop3.busLicenseNum,storeTypeCode=self.UserShop3.storeTypeCode,terminalAreaProvinceCode=self.UserShop3.areaProvinceCode,
terminalAreaCityCode=self.UserShop3.areaCityCode,terminalAreaDistrictCode=self.UserShop3.areaDistrictCode,terminalAddress=self.UserShop3.localStreet)
self.assertEqual(tmlRegist.model['success'],'0')
self.assertEqual(tmlRegist.model['checkResult'],None)
getList=ws.getApprovalList(approvalStatus='1',page='1',rows='15')
self.assertEqual(getList.model['success'],'0')
self.assertEqual(getList.model['approvalList'],[])
getList2=ws.getApprovalList(approvalStatus='0',page='1',rows='15')
approvid=getList2.model['approvalList'][0]['approvalId']
update('delete from dlworkflow.act_hi_procinst where BUSINESS_KEY_=?',approvid)
#正确获取已审批列表(有已审批数据的情况下)
def test_getApprovalList_existRefuse(self):
ws=webservice()
ws.login(self.UserShop.username,self.UserShop.password)
#注册一条待审批的数据
tmlRegist=ws.terminalRegistApprove(terminalLoginName=self.UserShop3.username,password=self.UserShop3.password,registerTel=self.UserShop3.registerTel,verificationCode='1111',invitationCode=self.UserShop.invitationCode,
terminalFullName=self.UserShop3.fullName,businessLicenseCode=self.UserShop3.busLicenseNum,storeTypeCode=self.UserShop3.storeTypeCode,terminalAreaProvinceCode=self.UserShop3.areaProvinceCode,
terminalAreaCityCode=self.UserShop3.areaCityCode,terminalAreaDistrictCode=self.UserShop3.areaDistrictCode,terminalAddress=self.UserShop3.localStreet)
self.assertEqual(tmlRegist.model['success'],'0')
self.assertEqual(tmlRegist.model['checkResult'],None)
getList=ws.getApprovalList(approvalStatus='0',page='1',rows='1')
approvid=getList.model['approvalList'][0]['approvalId']
taskid=getList.model['approvalList'][0]['taskId']
audit=ws.auditApproval(approvalId=approvid,taskId=taskid,auditStatus='1',approvalReason='拒绝该终端店注册成功!')
self.assertEqual(audit.model['success'],'0')
getList=ws.getApprovalList(approvalStatus='1',page='1',rows='15')
self.assertEqual(getList.model['success'],'0')
self.assertEqual(getList.model['approvalList'][0]['approvalStatus'],'2')
self.assertEqual(getList.model['approvalList'][0]['terminalLoginName'],self.UserShop3.username)
self.assertEqual(getList.model['approvalList'][0]['terminalFullName'],self.UserShop3.fullName)
self.assertEqual(getList.model['approvalList'][0]['terminalFullAddr'],self.UserShop3.localProvince+'-'+self.UserShop3.localCity+'-'+self.UserShop3.localCountry+'-'+self.UserShop3.localStreet)
update('delete from dlworkflow.act_hi_procinst where BUSINESS_KEY_=?',approvid)
#销售员角色登录获取待审批列表
def test_getApprovalList_saler(self):
ws=webservice()
ws.login(self.UserShop2.username,self.UserShop2.password)
getList=ws.getApprovalList(approvalStatus='0',page='1',rows='15')
self.assertEqual(getList.code,300)
self.assertEqual(getList.description,'错误的权限!')
#采购员角色登录获取待审批列表
def test_getApprovalList_buyer(self):
ws=webservice()
ws.login(self.UserShop6.username,self.UserShop6.password)
getList=ws.getApprovalList(approvalStatus='0',page='1',rows='15')
self.assertEqual(getList.code,300)
self.assertEqual(getList.description,'错误的权限!')
#配送员角色登录获取待审批列表
def test_getApprovalList_seder(self):
ws=webservice()
ws.login(self.UserShop4.username,self.UserShop4.password)
getList=ws.getApprovalList(approvalStatus='0',page='1',rows='15')
self.assertEqual(getList.code,300)
self.assertEqual(getList.description,'错误的权限!')
#财务员角色登录获取待审批列表
def test_getApprovalList_finer(self):
ws=webservice()
ws.login(self.UserShop5.username,self.UserShop5.password)
getList=ws.getApprovalList(approvalStatus='0',page='1',rows='15')
self.assertEqual(getList.code,300)
self.assertEqual(getList.description,'错误的权限!')
#验证翻页是否正常(每页显示一条,第一页有一条数据,第二页则无数据)
def test_getApprovalList_page(self):
ws=webservice()
ws.login(self.UserShop.username,self.UserShop.password)
#注册一条待审批的数据
tmlRegist=ws.terminalRegistApprove(terminalLoginName=self.UserShop3.username,password=self.UserShop3.password,registerTel=self.UserShop3.registerTel,verificationCode='1111',invitationCode=self.UserShop.invitationCode,
terminalFullName=self.UserShop3.fullName,businessLicenseCode=self.UserShop3.busLicenseNum,storeTypeCode=self.UserShop3.storeTypeCode,terminalAreaProvinceCode=self.UserShop3.areaProvinceCode,
terminalAreaCityCode=self.UserShop3.areaCityCode,terminalAreaDistrictCode=self.UserShop3.areaDistrictCode,terminalAddress=self.UserShop3.localStreet)
self.assertEqual(tmlRegist.model['success'],'0')
self.assertEqual(tmlRegist.model['checkResult'],None)
getList1=ws.getApprovalList(approvalStatus='0',page='1',rows='1')
self.assertEqual(getList1.model['success'],'0')
approvid=getList1.model['approvalList'][0]['approvalId']
self.assertEqual(getList1.model['approvalList'][0]['terminalLoginName'],self.UserShop3.username)
self.assertEqual(getList1.model['approvalList'][0]['terminalFullName'],self.UserShop3.fullName)
self.assertEqual(getList1.model['approvalList'][0]['terminalFullAddr'],self.UserShop3.localProvince+'-'+self.UserShop3.localCity+'-'+self.UserShop3.localCountry+'-'+self.UserShop3.localStreet)
getList2=ws.getApprovalList(approvalStatus='0',page='2',rows='1')
self.assertEqual(getList2.model['approvalList'],[])
update('delete from dlworkflow.act_hi_procinst where BUSINESS_KEY_=?',approvid)
def tearDown(self):
applyId=select_one('select apply_id from dlworkflow.dl_apply_terminal where terminal_tel=?',self.UserShop3.registerTel)
if applyId!=None:
update('delete from dlworkflow.act_hi_procinst where BUSINESS_KEY_=?',str(applyId.apply_id))
update('delete from dlworkflow.dl_apply_terminal where terminal_tel=?',self.UserShop3.registerTel)
def suite():
suite=unittest.TestSuite()
suite.addTest(getApprovalList("test_getApprovalList"))
suite.addTest(getApprovalList("test_getApprovalList_existApprove"))
suite.addTest(getApprovalList("test_getApprovalList_existRefuse"))
suite.addTest(getApprovalList("test_getApprovalList_notExistRefuse"))
suite.addTest(getApprovalList("test_getApprovalList_saler"))
suite.addTest(getApprovalList("test_getApprovalList_buyer"))
suite.addTest(getApprovalList("test_getApprovalList_seder"))
suite.addTest(getApprovalList("test_getApprovalList_finer"))
suite.addTest(getApprovalList("test_getApprovalList_page"))
return suite
| [
"[email protected]"
] | |
b98315d1a5ab6145cfe5f1ac344a84defb8be0b5 | 8f1c3c76bf8514818b733ba29fe575d8a5243add | /eduerp_facility/models/facility_line.py | 88d78e63223b5ccf9c0bb05ec4633ea3fbb65e53 | [
"Apache-2.0"
] | permissive | westlyou/eduerp | 27f1c7dcd0d2badf50cb6c69f5e761d7f0c6a898 | 968d79b5adc729bc81192604f1fc223517d38ccf | refs/heads/master | 2021-06-04T05:11:13.858246 | 2016-09-12T07:21:17 | 2016-09-12T07:21:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | # -*- coding: utf-8 -*-
###############################################################################
#
###############################################################################
from openerp import models, fields, api
from openerp.exceptions import ValidationError
class OpFacilityLine(models.Model):
_name = 'op.facility.line'
_rec_name = 'facility_id'
facility_id = fields.Many2one('op.facility', 'Facility', required=True)
quantity = fields.Float('Quantity', required=True)
@api.constrains('quantity')
def check_quantity(self):
if self.quantity <= 0.0:
raise ValidationError("Enter proper Quantity in Facilities!")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
9943e93a01401ca1cc7c610830d49e72c444c77f | 59880d47a533cf1f45f927adafff22d5ffb4796a | /Python/python_fundamentals/make_read_dictionary.py | 725d1d1fa73aa6b64157ebfd126bea054281fcdc | [] | no_license | mightymcalpine/DojoAssignments | 2bc7bb791630040dbb62da917a26b74bbdd574e4 | 9c0d80953f6ddbe840314f3d333b5f4590e0c9f4 | refs/heads/master | 2021-01-18T00:07:07.128554 | 2017-06-05T16:38:35 | 2017-06-05T16:38:35 | 84,257,743 | 0 | 0 | null | 2017-06-02T05:34:36 | 2017-03-07T23:47:27 | Python | UTF-8 | Python | false | false | 268 | py | mclp = {
'name': 'Lars',
'age': 35,
'origin': 'USA',
'lang': 'Python'
}
def aboutMe(obj):
print 'My name is', obj['name']
print 'My age is', obj['age']
print 'My country of origin is', obj['origin']
print 'My favorite language is', obj['lang']
aboutMe(mclp)
| [
"[email protected]"
] | |
2d067b2ac1695d0e8ac6f1a1a6161669c6fdea99 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /CsvdwQvNe8hYomcwB_11.py | 6927a3aaa072012090d89ad2c5a0818099ffc0f4 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | """
Create a function similar to Processings "map" function (check the
**Resources** tab), in which a value and its range is taken and remapped to a
new range.
The function takes 5 numbers:
* Value: `value`
* Range: `low1` and `high1`
* Range: `low2` and `high2`
### Examples
remap(7, 2, 12, 0, 100) ➞ 50
remap(17, 5, 55, 100, 30) ➞ 83.2
remap(50, 1, 51, 0, 100) ➞ 98
### Notes
* Test input will always be numbers.
* If the input range is `0`, return `0`.
"""
def remap(value, low1, high1, low2, high2):
h,i,j=value-low1,high1-low1,high2-low2
return 0 if high1-low1==0 else low2+(j/(i/h)) if low1>high1 or low2>high2 or low2<0 else (j/(i/h))
| [
"[email protected]"
] | |
20a7a8a8bd2ed8e48b7e48d46e2173c988a07490 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py | 7fa8b77d63648c814d1199051fd25f8268c9e5b3 | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 2,860 | py | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..registration import ExpertAutomatedRegistration
def test_ExpertAutomatedRegistration_inputs():
input_map = dict(
affineMaxIterations=dict(argstr='--affineMaxIterations %d', ),
affineSamplingRatio=dict(argstr='--affineSamplingRatio %f', ),
args=dict(argstr='%s', ),
bsplineMaxIterations=dict(argstr='--bsplineMaxIterations %d', ),
bsplineSamplingRatio=dict(argstr='--bsplineSamplingRatio %f', ),
controlPointSpacing=dict(argstr='--controlPointSpacing %d', ),
environ=dict(
nohash=True,
usedefault=True,
),
expectedOffset=dict(argstr='--expectedOffset %f', ),
expectedRotation=dict(argstr='--expectedRotation %f', ),
expectedScale=dict(argstr='--expectedScale %f', ),
expectedSkew=dict(argstr='--expectedSkew %f', ),
fixedImage=dict(
argstr='%s',
position=-2,
),
fixedImageMask=dict(argstr='--fixedImageMask %s', ),
fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ),
initialization=dict(argstr='--initialization %s', ),
interpolation=dict(argstr='--interpolation %s', ),
loadTransform=dict(argstr='--loadTransform %s', ),
metric=dict(argstr='--metric %s', ),
minimizeMemory=dict(argstr='--minimizeMemory ', ),
movingImage=dict(
argstr='%s',
position=-1,
),
movingLandmarks=dict(argstr='--movingLandmarks %s...', ),
numberOfThreads=dict(argstr='--numberOfThreads %d', ),
randomNumberSeed=dict(argstr='--randomNumberSeed %d', ),
registration=dict(argstr='--registration %s', ),
resampledImage=dict(
argstr='--resampledImage %s',
hash_files=False,
),
rigidMaxIterations=dict(argstr='--rigidMaxIterations %d', ),
rigidSamplingRatio=dict(argstr='--rigidSamplingRatio %f', ),
sampleFromOverlap=dict(argstr='--sampleFromOverlap ', ),
saveTransform=dict(
argstr='--saveTransform %s',
hash_files=False,
),
verbosityLevel=dict(argstr='--verbosityLevel %s', ),
)
inputs = ExpertAutomatedRegistration.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ExpertAutomatedRegistration_outputs():
output_map = dict(
resampledImage=dict(),
saveTransform=dict(),
)
outputs = ExpertAutomatedRegistration.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| [
"[email protected]"
] | |
01df3b50b123410aa99419c69bd77ff08103c28a | 12f664c45e338772832ce8a65213f12ee59451f6 | /devel/lib/python2.7/dist-packages/naoqi_bridge_msgs/msg/_BodyPoseResult.py | 271624dd3466f8c4165c0338570dafd695957b1d | [] | no_license | Jose-Pedro/NAO-KINECT-ROS | 89eefd3956a2d739496fb4e7199b7e523f47a2ec | efb01e20983788e62baac26d2aab7949729609b6 | refs/heads/master | 2021-01-19T04:13:42.647533 | 2016-06-22T19:46:45 | 2016-06-22T19:46:45 | 61,549,669 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,943 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from naoqi_bridge_msgs/BodyPoseResult.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class BodyPoseResult(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "naoqi_bridge_msgs/BodyPoseResult"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# no result currently
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(BodyPoseResult, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
| [
"[email protected]"
] | |
b3ad5ad1dfa77ccb6ae932da72d515588bbbcc8e | 4366912f5607c6c7c813028581b0a56b7f308cd0 | /models/gpt2.py | eace7928a0b813e65797b17d47994764f4999503 | [
"MIT"
] | permissive | gdh756462786/OpenDialog | 269ea74ac2bdef6ba56a199d84920d34dad673d0 | 94445b47c2442b7fa6fee6f666cfea1940196dd3 | refs/heads/master | 2022-12-07T09:43:27.613561 | 2020-08-16T07:05:00 | 2020-08-16T07:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,438 | py | from .header import *
from .test import TestAgent
class GPT2(nn.Module):
def __init__(self, vocab_size, unk_id, sep_id, topk, topp,
repetition_penalty,
config_path='data/config/model_config_dialogue_small.json'):
super(GPT2, self).__init__()
self.model_config = GPT2Config.from_json_file(config_path)
self.model = GPT2LMHeadModel(config=self.model_config)
self.model.resize_token_embeddings(vocab_size)
self.n_ctx = self.model.config.to_dict().get('n_ctx')
self.topk, self.topp = topk, topp
self.unk_id = unk_id
self.sep_id = sep_id
self.repetition_penalty = repetition_penalty
def forward(self, inpt_ids):
# inpt_ids: [batch, seq]
attn_mask = generate_attention_mask(inpt_ids)
outputs = self.model(
input_ids=inpt_ids,
attention_mask=attn_mask)
output = outputs[0] # [batch, seq, vocab]
return output
def predict(self, inpt_ids, max_len):
'''
batch_size is 1
inpt_ids: [seq]
return a list of ids (generated)
no pad, do not need attention_mask
'''
with torch.no_grad():
generated = []
for _ in range(max_len):
outputs = self.model(input_ids=inpt_ids)
next_token_logits = outputs[0][-1, :] # [vocab]
# ignore the [UNK] token
next_token_logits[self.unk_id] = -np.inf
# repetition penalty
if generated:
next_token_logits[list(set(generated))] /= self.repetition_penalty
filtered_logits = top_k_top_p_filtering(
next_token_logits,
top_k=self.topk,
top_p=self.topp)
next_token = torch.multinomial(
F.softmax(filtered_logits, dim=-1),
num_samples=1)
if next_token == self.sep_id:
break
generated.append(next_token.item())
inpt_ids = torch.cat((inpt_ids, next_token), dim=0)
# remember to cut off
inpt_ids = inpt_ids[-self.n_ctx:]
return generated
@torch.no_grad()
def predict_batch(self, inpt_ids, max_len):
'''
inpt_ids: [batch, seq]
return: samples*[batch]
'''
# change inpt_ids from [seq] to [batch, seq]
generated = []
prev, past = inpt_ids, None
batch_size = inpt_ids.shape[0]
stop_flag = np.zeros(batch_size) # [batch]
for _ in range(max_len):
outputs = self.model(input_ids=prev, past=past) # [batch, seq, vocab]
output, past = outputs[:2]
next_token_logits = output[:, -1, :] # [batch, vocab]
next_token_logits[:, self.unk_id] = -np.inf
# repetition penalty
for x in range(batch_size):
y = [item[x] for item in generated]
next_token_logits[x, y] /= self.repetition_penalty
filtered_logits = top_k_top_p_filtering_batch(
next_token_logits,
top_k=self.topk,
top_p=self.topp)
next_token = torch.multinomial(
F.softmax(filtered_logits, dim=-1),
num_samples=1) # [batch, 1]
# set up stop_flag
for idx, i in enumerate(next_token.squeeze(1)):
if i == self.sep_id:
stop_flag[idx] = 1
generated.append([token.item() for token in next_token.squeeze(1)])
prev = next_token
if sum(stop_flag) == batch_size:
break
# transpose
ng, batch_size = [], len(generated[0])
for i in range(batch_size):
ng.append([g[i] for g in generated])
return ng
class GPT2Agent(BaseAgent):
def __init__(self, total_steps, multi_gpu, vocab_file='data/vocab/vocab_small', run_mode='train', lang='zh', lm=False):
super(GPT2Agent, self).__init__()
# hyperparameters
try:
# self.gpu_ids = [int(i) for i in multi_gpu.split(',')]
self.gpu_ids = list(range(len(multi_gpu.split(','))))
except:
raise Exception(f'[!] multi gpu ids are needed, but got: {multi_gpu}')
assert run_mode in ['train', 'test', 'rerank', 'rerank_ir'], f'[!] running mode must be train or test, but got {run_mode}'
vocab_file = 'data/vocab/vocab_small' if lang == 'zh' else 'data/vocab/vocab_english'
lr = 1 if lm else 1.5e-4
self.args = {
'lr': lr,
'grad_clip': 1.0,
'pad': 0,
'tgt_len_size': 50,
'lr_gamma': 0.5,
'patience': 5,
'min_lr': 1e-5,
'warmup_steps': 2000,
'total_steps': total_steps,
'topk': 10000,
'topp': 1.0,
'config_path': 'data/config/model_config_dialogue_big.json',
'multi_gpu': self.gpu_ids,
'run_mode': run_mode,
'vocab_file': vocab_file,
'lang': lang,
'topic_transfer': {'音乐': 'music', '体育': 'sport', '数码产品': 'electric', '美食': 'food', '电影': 'movie'},
'balanceddata_parallel_gpu0_size': 2,
'repetition_penalty': 1,
}
# hyperparameters
# self.vocab = BertTokenizer.from_pretrained('/home/lt/data/GPT2_LCCC_base/')
self.vocab = BertTokenizer(vocab_file=self.args['vocab_file'])
self.vocab_size = len(self.vocab)
self.unk = self.vocab.convert_tokens_to_ids('[UNK]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.model = GPT2(
self.vocab_size,
self.unk,
self.sep,
self.args['topk'],
self.args['topp'],
self.args['repetition_penalty'],
config_path=self.args['config_path']
)
self.criterion = nn.CrossEntropyLoss(ignore_index=self.args['pad'], reduction='sum')
self.optimizer = transformers.AdamW(
self.model.parameters(),
lr=self.args['lr'],
correct_bias=True)
# need to obtain the whole iter
self.warmup_scheduler = transformers.get_linear_schedule_with_warmup(
self.optimizer,
num_warmup_steps=self.args['warmup_steps'],
num_training_steps=self.args['total_steps'])
if torch.cuda.is_available():
self.model.cuda()
# train: DataParallel; test: no DataParallel
if self.args['run_mode'] == 'train':
self.model = DataParallel(
self.model,
device_ids=self.gpu_ids)
# self.model = BalancedDataParallel(
# self.args['balanceddata_parallel_gpu0_size'],
# self.model,
# dim=0)
# run_mode == 'chatbot', use the bertretrieval for reranking
if run_mode in ['test', 'rerank', 'rerank_ir']:
'''
from multiview import MultiView
print(f'[!] MultiView reranker model will be initized')
self.reranker = MultiView(
topic=True,
length=False,
nidf_tf=True,
coherence=True,
fluency=False,
repetition_penalty=True,
mmi=True,
distinct=True,
mmi_path='ckpt/train_generative/gpt2_mmi/best.pt',
coherence_path='ckpt/train_retrieval/bertretrieval/best.pt',
topic_path='ckpt/fasttext/model.bin',
fluency_path='ckpt/LM/gpt2lm/best.pt',
)
print(f'[!] load multiview model over')
'''
from .bert_mc import BERTMCAgent
from .bert_retrieval import BERTRetrievalAgent
# self.reranker = BERTRetrievalAgent(multi_gpu, kb=False)
# self.reranker.load_model('ckpt/zh50w/bertretrieval/best.pt')
self.reranker = BERTMCAgent(multi_gpu, kb=False, model_type='mc')
self.reranker.load_model('ckpt/zh50w/bertmc/best.pt')
if run_mode == 'rerank_ir':
self.ir_agent = TestAgent()
self.show_parameters(self.args)
def train_model(self, train_iter, mode='train', recoder=None, idx_=0):
self.model.train()
total_loss, total_acc, batch_num = 0, [], 0
pbar = tqdm(train_iter)
oom_time = 0
try:
for idx, batch in enumerate(pbar):
cid = batch
self.optimizer.zero_grad()
logits = self.model(cid) # [batch, seq, vocab]
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = cid[..., 1:].contiguous()
loss = self.criterion(
shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
_, preds = shift_logits.max(dim=-1) # [batch, seq]
# ignore the pad
not_ignore = shift_labels.ne(self.args['pad']) # pad is 0 or 1
num_targets = not_ignore.long().sum().item() # the number of not pad tokens
correct = (shift_labels == preds) & not_ignore
correct = correct.float().sum()
# loss and token accuracy
accuracy = correct / num_targets
total_acc.append(accuracy.item())
loss = loss / num_targets
if mode == 'train':
loss.backward()
clip_grad_norm_(self.model.parameters(), self.args['grad_clip'])
self.optimizer.step()
self.warmup_scheduler.step()
total_loss += loss.item()
batch_num += 1
recoder.add_scalar(f'train-epoch-{idx_}/Loss', total_loss/batch_num, idx)
recoder.add_scalar(f'train-epoch-{idx_}/RunLoss', loss.item(), idx)
recoder.add_scalar(f'train-epoch-{idx_}/RunTokenAcc', accuracy, idx)
recoder.add_scalar(f'train-epoch-{idx_}/TokenAcc', np.mean(total_acc), idx)
pbar.set_description(f'[!] OOM: {oom_time}, train loss: {round(loss.item(), 4)}, token acc: {round(accuracy.item(), 4)}')
except RuntimeError as exception:
if 'out of memory' in str(exception):
oom_time += 1
torch.cuda.empty_cache()
else:
raise exception
return round(total_loss/batch_num, 4)
def test_model_samples(self, test_iter, path, samples=5):
'''
Generate `samples` candidates for one given conversation context
batch_size is 1
'''
def filter(x):
if '[SEP]' in x:
x = x[:x.index('[SEP]')]
return x.replace('[PAD]', '').replace('[SEP]', '').strip()
self.model.eval()
pbar = tqdm(test_iter)
max_size = self.args['tgt_len_size']
with open(path, 'w') as f:
for batch in pbar:
c, r = batch # c: [seq]
c = c.unsqueeze(0) # [1, seq]
c_ = c.expand(samples, c.shape[-1]) # [samples(batch), seq]
tgt = self.model.predict_batch(c_, max_size)
tgt = [self.vocab.convert_ids_to_tokens(i) for i in tgt]
tgt = [filter(' '.join(i)) for i in tgt]
ctx = self.vocab.convert_ids_to_tokens(c[0])
ctx = ' '.join(ctx)
ref = self.vocab.convert_ids_to_tokens(r)
ref = ' '.join(ref)
f.write(f'CTX: {ctx}\n')
f.write(f'REF: {ref}\n')
for idx, i in enumerate(tgt):
f.write(f'TGT{idx}: {i}\n')
f.write('\n')
print(f'[!] translate test dataset over, write into {path}')
@torch.no_grad()
def test_model(self, test_iter, path):
'''
Generate the test dataset and measure the performance
'''
def filter(x):
return x.replace('[PAD]', '')
self.model.eval()
pbar = tqdm(test_iter)
with open(path, 'w') as f:
for batch in pbar:
c, r = batch
max_size = max(len(r), self.args['tgt_len_size'])
tgt = self.model.predict(c, max_size)
text = self.vocab.convert_ids_to_tokens(tgt)
tgt = ''.join(text)
ctx = self.vocab.convert_ids_to_tokens(c)
ctx = filter(''.join(ctx))
ref = self.vocab.convert_ids_to_tokens(r)
ref = filter(''.join(ref))
f.write(f'CTX: {ctx}\n')
f.write(f'REF: {ref}\n')
f.write(f'TGT: {tgt}\n\n')
print(f'[!] translate test dataset over, write into {path}')
# measure the performance
(b1, b2, b3, b4), ((r_max_l, r_min_l, r_avg_l), (c_max_l, c_min_l, c_avg_l)), (dist1, dist2, rdist1, rdist2), (average, extrema, greedy) = cal_generative_metric(path, lang=self.args['lang'])
print(f'[TEST] BLEU: {b1}/{b2}/{b3}/{b4}; Length(max, min, avg): {c_max_l}/{c_min_l}/{c_avg_l}|{r_max_l}/{r_min_l}/{r_avg_l}; Dist: {dist1}/{dist2}|{rdist1}/{rdist2}; Embedding(average/extrema/greedy): {average}/{extrema}/{greedy}')
@torch.no_grad()
def test_model_rerank(self, test_iter, path, beam_size=8):
'''
For reranking the generated samples
Generate the test dataset and measure the performance
Batch size must be 1; default runing batch size (beam search size) is 16
'''
def filter(x):
return x.replace('[PAD]', '')
self.model.eval()
pbar = tqdm(test_iter)
with open(path, 'w') as f:
for batch in pbar:
c, r = batch # [S]
c_ = [deepcopy(c) for _ in range(beam_size)]
c_ = torch.stack(c_) # [B, S]
max_size = max(len(r), self.args['tgt_len_size'])
tgt = self.model.predict_batch(c_, max_size)
# cut from the first [SEP] token
tgt = [i[:i.index(self.sep)+1] if self.sep in i else i for i in tgt]
tgt = to_cuda(pad_sequence([torch.LongTensor(i) for i in tgt], batch_first=True, padding_value=self.pad)) # [B, S]
# rerank procedure
index = self.reranker.predict(c_, tgt)
tgt = tgt[index]
# ids to tokens
text = self.vocab.convert_ids_to_tokens(tgt)
tgt = '[CLS]' + filter(''.join(text))
ctx = self.vocab.convert_ids_to_tokens(c)
ctx = filter(''.join(ctx))
ref = self.vocab.convert_ids_to_tokens(r)
ref = filter(''.join(ref))
f.write(f'CTX: {ctx}\n')
f.write(f'REF: {ref}\n')
f.write(f'TGT: {tgt}\n\n')
f.flush()
print(f'[!] translate test dataset over, write into {path}')
# measure the performance
(b1, b2, b3, b4), ((r_max_l, r_min_l, r_avg_l), (c_max_l, c_min_l, c_avg_l)), (dist1, dist2, rdist1, rdist2), (average, extrema, greedy) = cal_generative_metric(path, lang=self.args['lang'])
print(f'[TEST] BLEU: {b1}/{b2}/{b3}/{b4}; Length(max, min, avg): {c_max_l}/{c_min_l}/{c_avg_l}|{r_max_l}/{r_min_l}/{r_avg_l}; Dist: {dist1}/{dist2}|{rdist1}/{rdist2}; Embedding(average/extrema/greedy): {average}/{extrema}/{greedy}')
@torch.no_grad()
def talk(self, topic, msgs, maxlen=50, batch_size=32):
'''
topic, msgs: msgs is a string which split with the [SEP] token
batch size is 1
n_ctx is 300/512
if the topic of the msgs is very low, append the trigger sentences into the msgs
'''
self.model.eval()
# ========== SMP-MCC use it ==========
# if topic is None:
# self.reranker.mode['topic'] = False
# else:
# # detect the topic of the msgs
# if self.args['run_mode'] in ['rerank', 'rerank_ir']:
# if not self.reranker.topic_scores(msgs, topic):
# trigger_s = random.choice(self.trigger_utterances[topic])
# msgs = f'{trigger_s} [SEP] {msgs}'
# print(f'[!] topic trigger mode is set up: {msgs}')
# ========== SMP-MCC use it ==========
if self.args['run_mode'] == 'test':
msgs = torch.LongTensor(self.vocab.encode(msgs)[-(512-maxlen):])
msgs = to_cuda(msgs)
tgt = self.model.predict(msgs, maxlen)
tgt = self.vocab.convert_ids_to_tokens(tgt)
tgt = ''.join(tgt)
return tgt
elif self.args['run_mode'] in ['rerank', 'rerank_ir']:
# ========== predict_batch ==========
msgs_ = self.vocab.encode(msgs)[-(512-maxlen):]
msgs_ = [deepcopy(msgs_) for _ in range(batch_size)]
msgs_ = torch.LongTensor(msgs_) # [batch, seq]
msgs_ = to_cuda(msgs_)
tgt = self.model.predict_batch(msgs_, maxlen)
tgt = [self.vocab.convert_ids_to_tokens(i) for i in tgt]
# cut from the first [SEP] token
n_tgt = []
for i in tgt:
if '[SEP]' in i:
i = i[:i.index('[SEP]')]
n_tgt.append(''.join(i))
# multiview scores
# rerank_ir also use the fast retrieval model
if self.args['run_mode'] == 'rerank_ir':
retrieval_rest = self.ir_agent.model.search(topic, msgs, samples=batch_size)
retrieval_rest = [i['response'] for i in retrieval_rest]
# remove the utterances that in the self.history
retrieval_rest = list(set(retrieval_rest) - set(self.history))
n_tgt.extend(retrieval_rest)
contexts = [msgs] * len(n_tgt)
if topic:
topic = [self.args['topic_transfer'][topic]] * len(n_tgt)
scores = self.reranker(contexts, n_tgt, topic=topic, history=self.history)[0]
else:
scores = self.reranker(contexts, n_tgt, topic=None)[0]
index = np.argmax(scores)
if index > batch_size:
print(f'[!] 从检索式对话系统中选择回复; bs/length/index: {batch_size}/{len(n_tgt)}/{index}')
else:
print(f'[!] 从生成式对话系统中选择回复; bs/length/index: {batch_size}/{len(n_tgt)}/{index}')
response = n_tgt[index]
return response
else:
raise Exception(f'[!] error in gpt2 model `talk` function')
| [
"[email protected]"
] | |
5a75ef259978e19f80ba67378efd742ffb0fb786 | 2d1649a7a00d49b72ed7e53afa4abb3c9281ce03 | /.history/ParticleFilter/go_to_goal_20190421222926.py | 6db3be37efaa0702e3225fa476c85d0605f4a0f5 | [] | no_license | joshzhang5/CS3630Lab6 | 9547dc6c89198e9bb4aebd8359d4feb974082d20 | 69e6df12829e18a211ae850236d74b4d728046ef | refs/heads/master | 2020-05-15T13:59:51.906195 | 2019-04-22T18:21:42 | 2019-04-22T18:21:42 | 182,317,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,093 | py | # Jiaxi Zhang
# George McAlear
try:
import matplotlib
matplotlib.use('TkAgg')
except ImportError:
pass
from skimage import color
import numpy as np
from numpy.linalg import inv
import threading
import time
import sys
import asyncio
from PIL import Image
from markers import detect, annotator
from grid import CozGrid
from gui import GUIWindow
from particle import Particle, Robot
from setting import *
from particle_filter import *
from utils import *
from time import sleep
import time
import cozmo
from cozmo.util import distance_mm, degrees, speed_mmps, Pose
#particle filter functionality
class ParticleFilter:
def __init__(self, grid):
self.particles = Particle.create_random(PARTICLE_COUNT, grid)
self.grid = grid
def update(self, odom, r_marker_list):
# ---------- Motion model update ----------
self.particles = motion_update(self.particles, odom)
# ---------- Sensor (markers) model update ----------
self.particles = measurement_update(self.particles, r_marker_list, self.grid)
# ---------- Show current state ----------
# Try to find current best estimate for display
m_x, m_y, m_h, m_confident = compute_mean_pose(self.particles)
return (m_x, m_y, m_h, m_confident)
class CozmoWarehouseWorker:
def __init__(self, robot: cozmo.robot.Robot):
self.current_arena_pose = None
self.last_robot_pose = robot.pose
self.robot = robot
# start streaming
await robot.set_head_angle(degrees(3)).wait_for_completed()
robot.camera.image_stream_enabled = True
robot.camera.color_image_enabled = False
robot.camera.enable_auto_exposure()
# Obtain the camera intrinsics matrix
fx, fy = robot.camera.config.focal_length.x_y
cx, cy = robot.camera.config.center.x_y
self.camera_settings = np.array([
[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]
], dtype=np.float)
self.pick_up_pose = Pose(x=4.5, y=20, 0, angle_z=degrees(90))
self.drop_off_pose = Pose(x=21.75, y=13.75, 0, angle_z=degrees(90))
self.drop_off_directions = [Pose(x=3, y=4.5, 0, angle_z=degrees(0)), Pose(x=21.75, y=4.5, 0, angle_z=degrees(90)), self.drop_off_pose]
self.pick_up_directions = [Pose(x=21.75, y=4.5, 0, angle_z=degrees(90)), Pose(x=3, y=4.5, 0, angle_z=degrees(0)), self.pick_up_pose]
self.drive_speed = speed_mmps(50)
self.grid = CozGrid("map_arena.json")
self.pf = ParticleFilter(self.grid)
self.gui = GUIWindow(self.grid, show_camera=True)
self.gui.show_particles(pf.particles)
self.gui.show_mean(0, 0, 0)
self.gui.start()
async def drive_to(directions):
print("-" * 10 + "DRIVING" + "-" * 10)
if isinstance(directions, (list,)):
for pose in directions:
self.__drive_to_pose(pose)
else:
self.__drive_to_pose(directions, drive_speed)
print("-" * 20)
async def __drive_to_pose(pose):
print("We are at ", self.current_arena_pose, " and we are driving to ", pose)
directions = pose - self.current_arena_pose
print("We will follow these directions: ", directions)
await self.__execute_directions(directions, drive_speed)
print("Directions followed!")
def update_current_arena_pose(self):
print("-" * 10 + "UPDATING POSE" + "-" * 10)
coordinate_systems_diff = diff_heading_deg(self.current_robot_pose.rotation.degrees, self.current_arena_pose.rotation.degrees)
arena_initial_pose_mm = rotate_point(self.current_robot_pose.position.x, self.current_robot_pose.position.y, coordinate_systems_diff)
arena_final_pose_mm = rotate_point(self.robot.pose.position.x, self.robot.pose.position.y, coordinate_systems_diff)
print("We think we moved ", convertPoseFromMmToInches(arena_final_pose_mm - arena_initial_pose_mm))
self.current_arena_pose = self.current_arena_pose + convertPoseFromMmToInches(arena_final_pose_mm - arena_initial_pose_mm)
print("Current pose is now ", self.current_arena_pose)
print("-" * 20)
async def pick_up_cube(self, tries=5):
print("-" * 10 + "GETTING CUBE" + "-" * 10)
cube = await self.robot.world.wait_for_observed_light_cube(timeout=30)
print("Found cube: %s" % cube)
picked_up_cube = await self.robot.pickup_object(cube, num_retries=tries).wait_for_completed().obj
if (picked_up_cube == None):
print("Could not get the cube.")
await self.robot.say_text("Help me!").wait_for_completed()
else:
print("Picked up cube!")
print("-" * 20)
async def set_down_cube(self):
print("-" * 10 + "SETTING DOWN CUBE" + "-" * 10)
await self.robot.set_lift_height(0.0).wait_for_completed()
await self.robot.set_head_angle(degrees(3)).wait_for_completed()
print("-" * 20)
async def __execute_directions(self, directions):
print("Robot is at: ", self.robot.pose)
await self.robot.turn_in_place(angle=directions.rotation.angle_z).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to X: ", self.robot.pose)
await self.robot.drive_straight(distance=distance_mm(directions.position.x * grid.scale), speed=self.drive_speed).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the X direction: ", self.robot.pose)
await self.robot.turn_in_place(angle=degrees(90)).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to Y: ", self.robot.pose)
await self.robot.drive_straight(distance=distance_mm(directions.position.y * grid.scale), speed=self.drive_speed).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the Y direction: ", self.robot.pose)
async def localize(self, turn_angle=20):
print("-" * 10 + "LOCALIZING" + "-" * 10)
# reset our location estimates
conf = False
self.current_arena_pose = Pose(0,0,0,angle_z=degrees(0))
self.pf = ParticleFilter(grid)
# reset lift and head
await self.robot.set_lift_height(0.0).wait_for_completed()
await self.robot.set_head_angle(degrees(3)).wait_for_completed()
while not conf:
# move a little
self.current_robot_pose = self.robot.pose
await self.robot.turn_in_place(angle=degrees(turn_angle)).wait_for_completed()
odometry = self.__compute_odometry()
detected_markers, camera_image = await self.__marker_processing()
# update, motion, and measurment with the odometry and marker data
curr_x, curr_y, curr_h, conf = pf.update(odometry, detected_markers)
# update gui
self.gui.show_particles(self.pf.particles)
self.gui.show_mean(curr_x, curr_y, curr_h)
self.gui.show_camera_image(camera_image)
self.gui.updated.set()
self.current_arena_pose = Pose(curr_x , curr_y, 0, angle_z=degrees(curr_h))
print("We localized to arena location ", self.current_arena_pose)
print("-" * 20)
def __compute_odometry(self, cvt_inch=True):
'''
Compute the odometry given the current pose of the robot (use robot.pose)
Input:
- curr_pose: a cozmo.robot.Pose representing the robot's current location
- cvt_inch: converts the odometry into grid units
Returns:
- 3-tuple (dx, dy, dh) representing the odometry
'''
last_x, last_y, last_h = self.last_robot_pose.position.x, self.last_robot_pose.position.y, \
self.last_robot_pose.rotation.angle_z.degrees
curr_x, curr_y, curr_h = self.robot.pose.position.x, self.robot.pose.position.y, \
self.robot.pose.rotation.angle_z.degrees
dx, dy = rotate_point(curr_x-last_x, curr_y-last_y, -last_h)
if cvt_inch:
dx, dy = dx / grid.scale, dy / grid.scale
return (dx, dy, diff_heading_deg(curr_h, last_h))
async def __marker_processing(self, show_diagnostic_image=False):
'''
Obtain the visible markers from the current frame from Cozmo's camera.
Since this is an async function, it must be called using await, for example:
markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)
Input:
- robot: cozmo.robot.Robot object
- camera_settings: 3x3 matrix representing the camera calibration settings
- show_diagnostic_image: if True, shows what the marker detector sees after processing
Returns:
- a list of detected markers, each being a 3-tuple (rx, ry, rh)
(as expected by the particle filter's measurement update)
- a PIL Image of what Cozmo's camera sees with marker annotations
'''
# Wait for the latest image from Cozmo
image_event = await self.robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)
# Convert the image to grayscale
image = np.array(image_event.image)
image = color.rgb2gray(image)
# Detect the markers
markers, diag = detect.detect_markers(image, self.camera_settings, include_diagnostics=True)
# Measured marker list for the particle filter, scaled by the grid scale
marker_list = [marker['xyh'] for marker in markers]
marker_list = [(x/self.grid.scale, y/self.grid.scale, h) for x,y,h in marker_list]
# Annotate the camera image with the markers
if not show_diagnostic_image:
annotated_image = image_event.image.resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(annotated_image, markers, scale=2)
else:
diag_image = color.gray2rgb(diag['filtered_image'])
diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(diag_image, markers, scale=2)
annotated_image = diag_image
return marker_list, annotated_image
async def run(robot: cozmo.robot.Robot):
cosimo = CozmoWarehouseWorker()
await cosimo.localize()
await cosimo.drive_to(cosimo.pick_up_pose)
while True:
await cosimo.pick_up_cube(tries=5)
await cosimo.drive_to(cosimo.drop_off_directions)
await cosimo.set_down_cube()
await cosimo.drive_to(cosimo.pick_up_directions)
class CozmoThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, daemon=False)
def run(self):
cozmo.robot.Robot.drive_off_charger_on_connect = False # Cozmo can stay on his charger
cozmo.run_program(run, use_viewer=False)
if __name__ == '__main__':
# cozmo thread
cozmo_thread = CozmoThread()
cozmo_thread.start()
# init
| [
"[email protected]"
] | |
a71ff1203b05d22a7207347c13840907b50568bb | 9a4de72aab094c87cfee62380e7f2613545eecfb | /accident/models.py | 83110248a89e95df1135aa7b443aa06f856621b5 | [] | no_license | jamesduan/asset | ed75765c30a5288aaf4f6c56bbf2c9a059105f29 | f71cb623b5ba376309cb728ad5c291ced2ee8bfc | refs/heads/master | 2021-01-10T00:06:41.120678 | 2017-05-27T11:40:48 | 2017-05-27T11:40:48 | 92,730,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,401 | py | # -*- coding: utf-8 -*-
from django.db import models
from cmdb.models import DdDomainV2, DdUsers, DdDepartmentNew
from util.timelib import timelength_format, stamp2str
from monitor.models import EventLevelMap
from cmdb.models import AppWeb
from change.models import Action
class AccidentParentType(models.Model):
_database = 'accident'
id = models.AutoField(primary_key=True,)
name = models.CharField(max_length=50,) #事故类型
define = models.TextField() #类型定义
enable = models.IntegerField(max_length=4, default=0) #类型标记
class Meta:
managed = False
db_table = 'accident_parent_type'
class AccidentType(models.Model):
_database = 'accident'
id = models.AutoField(primary_key=True,)
ptype = models.ForeignKey(AccidentParentType, db_column='ptype_id') #上级类型
name = models.CharField(max_length=50,) #事故类型
enable = models.IntegerField(max_length=4, default=0) #类型标记
class Meta:
managed = False
db_table = 'accident_type'
class AccidentStatus(models.Model):
_database = 'accident'
id = models.AutoField(primary_key=True,)
name = models.CharField(max_length=50,) #事故状态
class Meta:
managed = False
db_table = 'accident_status'
class AccidentOtherDomain(models.Model):
_database = 'accident'
id = models.AutoField(primary_key=True,)
domainname = models.CharField(max_length=150)
deptid = models.IntegerField(max_length=11)
deptname = models.CharField(max_length=150)
class Meta:
managed = False
db_table = 'accident_other_domain'
class Accident(models.Model):
_database = 'accident'
LEVEL_NAME = (
(0, '未定义'),
(1, 'S1'),
(2, 'S2'),
(3, 'S3'),
(4, 'S4')
)
id = models.AutoField(primary_key=True)
accidentid = models.IntegerField(max_length=11, unique=True, blank=True) #事故编号
title = models.CharField(max_length=255) #事故名称
level = models.IntegerField(max_length=4, choices=LEVEL_NAME, default=0, blank=True) #事故等级
find_user_name = models.CharField(max_length=50, blank=True) #事故发现人
duty_manager_name = models.CharField(max_length=50, blank=True) #值班经理
happened_time = models.IntegerField(max_length=12, blank=True) #事故发生时间
finish_time = models.IntegerField(max_length=12,blank=True, default=0) #事故恢复时间
reason = models.TextField(blank=True) #事故原因
is_accident = models.IntegerField(max_length=4, default=0) #是否事故
status = models.ForeignKey(AccidentStatus, db_column='status_id',blank=True, null=True, on_delete=models.SET_NULL) #事故跟进状态,默认事故中
process = models.TextField(blank=True) #事故经过
comment = models.CharField(max_length=255, blank=True) #事故备注
duty_users = models.CharField(max_length=150, blank=True) #责任报告人
type = models.ForeignKey(AccidentType, db_column='type_id', blank=True, null=True, on_delete=models.SET_NULL) #事故根源类型
affect = models.TextField(blank=True) #影响(范围及程度、订单量、销售额)
is_available = models.IntegerField(max_length=4, default=0) #是否影响可用性
root_reason = models.TextField(blank=True) #事故根本原因
is_punish = models.IntegerField(max_length=4, default=0) #是否 处罚,默认不处罚
punish_users = models.CharField(max_length=150,blank=True) #惩罚人(多个)
punish_content = models.TextField(blank=True) #惩罚内容
basicinfo_time = models.IntegerField(max_length=12,blank=True) #基础信息录入时间
detailinfo_time = models.IntegerField(max_length=12,blank=True) #详细信息录入时间
mantis_id = models.IntegerField(max_length=11,default=0) #mantis编号,默认为0,不提交mantis
is_online = models.IntegerField(max_length=4, default=0, blank=True) # 是否电商系统
health = models.FloatField(max_length=10, default=1, blank=True) # 调校系数-业务健康度
@property
def time_length(self): # 事故影响时长
return timelength_format(self.happened_time, self.finish_time)
@property
def duty_manager_name_ch(self):
if self.duty_manager_name != '' and self.duty_manager_name is not None:
try:
user = DdUsers.objects.using('default').get(username=self.duty_manager_name, enable=0)
except DdUsers.DoesNotExist:
user = None
username = user.username_ch if user else ''
else:
username = ''
return username
@property
def find_user_department(self):
if self.find_user_name != '' and self.find_user_name is not None:
try:
find_user = DdUsers.objects.using('default').get(username=self.find_user_name, enable=0)
except DdUsers.DoesNotExist:
find_user = None
find_user_department = find_user.dept_level2.deptname
else:
find_user_department = ''
return find_user_department
@property
def duty_domains(self):
return self.accident_domain.using('accident').all()
@property
def duty_dept_ids(self):
dm_ids = [a_dm.domainid for a_dm in self.duty_domains]
dept_ids = [str(dm.department_level2.id) for dm in DdDomainV2.objects.filter(id__in=dm_ids) if
dm.department_level2]
others = [str(other.deptid) for other in AccidentOtherDomain.objects.filter(id__in=dm_ids)]
return (',').join(list(set(dept_ids)) + others)
@property
def duty_dept_names(self):
dm_ids = [a_dm.domainid for a_dm in self.duty_domains]
duty_depts = [dm.department_level2.deptname for dm in DdDomainV2.objects.filter(id__in=dm_ids) if dm.department_level2 and dm.department_level2.deptname]
others = [other.deptname for other in AccidentOtherDomain.objects.filter(id__in = dm_ids)]
return (',').join(list(set(duty_depts)) + others)
@property
def duty_domain_ids(self):
dm_ids = [str(a_dm.domainid) for a_dm in self.duty_domains]
return (',').join(dm_ids)
@property
def duty_domain_names(self):
dm_ids = [a_dm.domainid for a_dm in self.duty_domains]
duty_dms = [dm.domainname for dm in DdDomainV2.objects.filter(id__in=dm_ids)]
others = [other.domainname for other in AccidentOtherDomain.objects.filter(id__in=dm_ids)]
return (',').join(duty_dms + others)
@property
def logs(self):
return AccidentLog.objects.using('accident').filter(accident_id=self.accidentid).order_by('-create_time')
@property
def logs_happened(self):
return AccidentLog.objects.using('accident').filter(accident_id=self.accidentid).order_by('happened_time')
@property
def action(self):
return self.accident_action.using('accident').all().select_related()
@property
def happened_time_str(self):
return stamp2str(self.happened_time, formt='%Y-%m-%d %H:%M:%S')
@property
def finish_time_str(self):
return stamp2str(self.finish_time, formt='%Y-%m-%d %H:%M:%S')
@property
def basic_sla(self): # 基本信息填写SLA,精确到分钟
return timelength_format(self.finish_time, self.basicinfo_time, unit='m')
@property
def detail_sla(self): # 详细信息填写SLA,精确到分钟
return timelength_format(self.finish_time, self.detailinfo_time, unit='m')
@property
def is_online_str(self):
if self.is_online == 1:
return u'电商系统'
elif self.is_online == 2:
return u'办公系统'
else:
return u'未定义'
class Meta:
managed = False
db_table = 'accident'
class AccidentDomain(models.Model):
_database = 'accident'
id = models.AutoField(primary_key=True)
accident = models.ForeignKey(Accident, related_name='accident_domain', db_column='accident_id', db_index=True, to_field='accidentid', on_delete=models.SET_NULL, null=True) #事故ID
# accident_id = models.IntegerField(max_length=11) # 事故ID
# domain = models.ForeignKey(DdDomainV2, db_column='domainid', blank=True, on_delete=models.SET_NULL, null=True)
domainid = models.IntegerField(max_length=11) # 责任DomainID
departmentid = models.IntegerField(max_length=11) # 责任部门ID
@property
def domainname(self):
try:
domainname = DdDomainV2.objects.using('default').get(id=self.domainid).domainname
except DdDomainV2.DoesNotExist:
try:
domainname = AccidentOtherDomain.objects.get(id=self.domainid).domainname
except AccidentOtherDomain.DoesNotExist:
domainname = ''
return domainname
@property
def deptname(self):
try:
deptname = DdDepartmentNew.objects.using('default').get(id=self.departmentid).deptname
except DdDepartmentNew.DoesNotExist:
try:
deptname = AccidentOtherDomain.objects.get(id=self.departmentid).deptname
except AccidentOtherDomain.DoesNotExist:
deptname = ''
return deptname
class Meta:
managed = False
db_table = 'accident_domain'
class AccidentAction(models.Model):
_database = 'accident'
STATUS_NAME = (
(1, '进行中'),
(2, '延迟'),
(200, '已完成'),
(400, '已取消')
)
id = models.AutoField(primary_key=True)
accident = models.ForeignKey(Accident, related_name='accident_action', db_column='accident_id', to_field='accidentid') #事故ID
# accident_id = models.IntegerField(max_length=11) #事故ID
action = models.CharField(max_length=255) #action内容
duty_users = models.CharField(max_length=150) #action负责人ID
create_time = models.IntegerField(max_length=12, blank=True, default=0) #录入时间
expect_time = models.IntegerField(max_length=12) #预期完成时间
finish_time = models.IntegerField(max_length=12, blank=True, default=0) #实际完成时间
trident_id = models.CharField(max_length=30, blank=True, default='') #tridentID(预期完成时间超过两周的action需关联)
status = models.IntegerField(max_length=4, choices=STATUS_NAME, default=1) #action状态
comment = models.CharField(max_length=255, blank=True)
@property
def dutydept_name(self):
if self.duty_users != '' and self.duty_users is not None:
users = DdUsers.objects.filter(username__in=self.duty_users.split(','), enable=0)
if users.exists():
return (',').join(list(set([user.dept_level2.deptname for user in users if user.dept_level2])))
else:
return ''
else:
return ''
# @property
# def dutydept(self):
# users = DdUsers.objects.filter(username__in = self.duty_users.split(','), enable=0)
# dept_ids = list(set([user.dept_level2.id for user in users]))
# depts = DdDepartmentNew.objects.filter(id__in = dept_ids, enable = 0)
# if depts:
# return depts
# else:
# return []
@property
def expect_time_format(self):
return stamp2str(self.expect_time, formt='%Y-%m-%d')
class Meta:
managed = False
db_table = 'accident_action'
class AccidentPool(models.Model):
_database = 'accident'
id = models.AutoField(primary_key=True)
accident_id = models.IntegerField(max_length=11) # 事故ID
app_id = models.IntegerField(max_length=11) # 应用ID
enable = models.IntegerField(max_length=4, default=0) # 状态 1代表废弃
create_time = models.IntegerField(max_length=12) # 录入时间
@property
def app(self):
try:
app = AppWeb.objects.using('default').get(id=self.app_id)
except AppWeb.DoesNotExist:
app = None
return app
class Meta:
managed = False
db_table = 'accident_pool'
class AccidentLog(models.Model):
_database = 'accident'
SOURCE_NAME = (
(0, '事故中心'),
(1, '配置变更'),
(2, '告警事件'),
)
FROM_ACCIDENT_CHOICE = (
(0, '默认'),
(1, '事故发生'),
(2, '事故恢复'),
)
id = models.AutoField(primary_key=True)
accident_id = models.IntegerField(max_length=11, default=0, blank=True) #事故ID
username = models.CharField(max_length=150) #录入员工
source = models.IntegerField(max_length=4, choices=SOURCE_NAME, default=0) #系统来源
from_id = models.IntegerField(max_length=11, default=0) #配置变更或告警事件表的ID
app_id = models.IntegerField(max_length=4, default=0, blank=True) #受影响应用ID
ip = models.CharField(max_length=255, blank=True) #IP
level_id = models.IntegerField(max_length=4, default=0, blank=True) #log重要性等级
message = models.TextField() #log内容
create_time = models.IntegerField(max_length=12, blank=True, default=0) #log系统写入时间
happened_time = models.IntegerField(max_length=12, blank=True, default=0) #log描述的发生时间
from_accident = models.IntegerField(max_length=4, default=0, choices=FROM_ACCIDENT_CHOICE) #事故发生和恢复时间记录,需同步事故表
is_process = models.IntegerField(max_length=4, default=0, blank=True) #是否同步至事故处理经过,预留
@property
def level_name(self):
if self.source == 0:
level_name = 'L5'
elif self.source == 1:
actions = Action.objects.using('change').filter(level_id=self.level_id)
if actions.count() > 1:
level_name = actions.first().get_level_name
else:
level_name = ''
elif self.source == 2:
try:
level_name = EventLevelMap.objects.using('monitor').get(id=self.level_id).name
except EventLevelMap.DoesNotExist:
level_name = '未定义'
else:
level_name = '未定义'
return level_name
@property
def create_time_format(self):
return stamp2str(self.create_time, formt='%Y-%m-%d %H:%M:%S')
@property
def happened_time_format(self):
return stamp2str(self.happened_time, formt='%Y/%m/%d %H:%M:%S')
@property
def images(self):
return AccidentLogImage.objects.filter(accident_log_id=self.id)
class Meta:
managed = False
ordering = ('-create_time',)
db_table = 'accident_log'
class AccidentLogImage(models.Model):
_database = 'accident'
id = models.AutoField(primary_key=True)
accident_log_id = models.IntegerField(max_length=11, default=None) #事故log信息ID
image = models.ImageField(upload_to='image/accident/%Y/%m/%d', db_column='image_path', max_length=255) #log图片存储路径
create_time = models.IntegerField(max_length=12, blank=True, default=0) # 事故log上传图片时间
class Meta:
managed = False
db_table = 'accident_log_image' | [
"[email protected]"
] | |
e2ea868419ba343d752775edae4710654301638e | cbd6d41e836348e4aabd122e5677e47e270fd417 | /logwatch.py | d76ed7718777cef3481c7e576afa9e6ae190dd2d | [] | no_license | gkasieczka/cms-bot | 750879de82a5f51f781e3d3dacc7e59dd582c681 | 06a07a9c389a13a9694578883a171f597af60998 | refs/heads/master | 2021-01-23T01:46:25.212167 | 2016-08-29T09:47:54 | 2016-08-29T09:47:54 | 66,856,373 | 0 | 0 | null | 2016-08-29T15:34:44 | 2016-08-29T15:34:43 | null | UTF-8 | Python | false | false | 2,116 | py | #!/usr/bin/env python
from os.path import exists, join, basename
from sys import exit
from commands import getstatusoutput
from hashlib import sha256
def run_cmd (cmd, exit_on_error=True):
err, out = getstatusoutput (cmd)
if err and exit_on_error:
print out
exit (1)
return out
class logwatch (object):
def __init__ (self, service, log_dir="/var/log"):
self.log_dir = join(log_dir,"logwatch_" + service)
def process(self, logs, callback, **kwrds):
if not logs: return True, 0
info_file = join(self.log_dir, "info")
if not exists ("%s/logs" % self.log_dir): run_cmd ("mkdir -p %s/logs" % self.log_dir)
prev_lnum, prev_hash, count, data = -1, "", 0, []
if exists(info_file):
prev_hash,ln = run_cmd("head -1 %s" % info_file).strip().split(" ",1)
prev_lnum = int(ln)
if prev_lnum<1: prev_lnum=1
for log in reversed(logs):
service_log = join (self.log_dir, "logs", basename(log))
run_cmd ("rsync -a %s %s" % (log, service_log))
cur_hash = sha256(run_cmd("head -1 %s" % service_log)).hexdigest()
data.insert(0,[log , service_log, 1, cur_hash, False])
if (prev_lnum>0) and (cur_hash == prev_hash):
data[0][2] = prev_lnum
break
data[-1][4] = True
for item in data:
lnum, service_log = item[2], item[1]
get_lines_cmd = "tail -n +%s %s" % (str(lnum), service_log)
if lnum<=1: get_lines_cmd = "cat %s" % service_log
print "Processing %s:%s" % (item[0], str(lnum))
lnum -= 1
for line in run_cmd (get_lines_cmd).split ("\n"):
count += 1
lnum += 1
try: ok = callback(line, count, **kwrds)
except: ok = False
if not ok:
if (prev_lnum!=lnum) or (prev_hash!=item[3]):
run_cmd("echo '%s %s' > %s" % (item[3], str(lnum),info_file))
return status, count
if (prev_lnum!=lnum) or (prev_hash!=item[3]):
prev_lnum=-1
cmd = "echo '%s %s' > %s" % (item[3], str(lnum),info_file)
if not item[4]: cmd = cmd + " && rm -f %s" % service_log
run_cmd(cmd)
return True, count
| [
"[email protected]"
] | |
aa03e5c285a6c880213dfae57d40bc28041b2a2c | b0d2033578705c14d9a65a604519be06450d85ae | /Leetcode/Two_Sum.py | 90888ad7e70dfd52198219b8ae1a5603b3ee4cf0 | [] | no_license | Pavithra-Rajan/DSA-Practice | cc515c4dd4f5a37b026640b9a5edfdabf8f9be9d | 5fee51ee91c2125c084b768a5d5b35a48031506c | refs/heads/main | 2023-09-03T11:47:25.791511 | 2021-11-16T19:39:00 | 2021-11-16T19:39:00 | 374,404,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
dic={} # potential value dictionary
for i in range(len(nums)):
if nums[i] in dic:
return [dic[nums[i]],i]
else:
dic[target-nums[i]]=i
"""ret=[]
for i in range(0,len(nums)-1):
for j in range(i+1,len(nums)):
if nums[i]+nums[j]==target:
ret.append(i)
ret.append(j)
break
return ret""" | [
"[email protected]"
] | |
0ac75cb30e5edbac6f78fe149404bea2aa7cbaf4 | add74ecbd87c711f1e10898f87ffd31bb39cc5d6 | /xcp2k/classes/_each197.py | 4136b1ca69a3780d7e42a9415431bdb3b05ed28e | [] | no_license | superstar54/xcp2k | 82071e29613ccf58fc14e684154bb9392d00458b | e8afae2ccb4b777ddd3731fe99f451b56d416a83 | refs/heads/master | 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | from xcp2k.inputsection import InputSection
class _each197(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Just_energy': 'JUST_ENERGY', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Xas_scf': 'XAS_SCF', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Rot_opt': 'ROT_OPT', 'Cell_opt': 'CELL_OPT', 'Band': 'BAND', 'Ep_lin_solver': 'EP_LIN_SOLVER', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Replica_eval': 'REPLICA_EVAL', 'Bsse': 'BSSE', 'Shell_opt': 'SHELL_OPT', 'Tddft_scf': 'TDDFT_SCF'}
| [
"[email protected]"
] | |
36c5c8842a88345e586c78a92b069ef8e139664d | 06f7ffdae684ac3cc258c45c3daabce98243f64f | /vsts/vsts/extension_management/v4_0/models/extension_state.py | 9118bdb8dc6b0f345d585860138c0bce5bf482df | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | kenkuo/azure-devops-python-api | 7dbfb35f1c9637c9db10207824dd535c4d6861e8 | 9ac38a97a06ee9e0ee56530de170154f6ed39c98 | refs/heads/master | 2020-04-03T17:47:29.526104 | 2018-10-25T17:46:09 | 2018-10-25T17:46:09 | 155,459,045 | 0 | 0 | MIT | 2018-10-30T21:32:43 | 2018-10-30T21:32:42 | null | UTF-8 | Python | false | false | 2,344 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .installed_extension_state import InstalledExtensionState
class ExtensionState(InstalledExtensionState):
"""ExtensionState.
:param flags: States of an installed extension
:type flags: object
:param installation_issues: List of installation issues
:type installation_issues: list of :class:`InstalledExtensionStateIssue <extension-management.v4_0.models.InstalledExtensionStateIssue>`
:param last_updated: The time at which this installation was last updated
:type last_updated: datetime
:param extension_name:
:type extension_name: str
:param last_version_check: The time at which the version was last checked
:type last_version_check: datetime
:param publisher_name:
:type publisher_name: str
:param version:
:type version: str
"""
_attribute_map = {
'flags': {'key': 'flags', 'type': 'object'},
'installation_issues': {'key': 'installationIssues', 'type': '[InstalledExtensionStateIssue]'},
'last_updated': {'key': 'lastUpdated', 'type': 'iso-8601'},
'extension_name': {'key': 'extensionName', 'type': 'str'},
'last_version_check': {'key': 'lastVersionCheck', 'type': 'iso-8601'},
'publisher_name': {'key': 'publisherName', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, flags=None, installation_issues=None, last_updated=None, extension_name=None, last_version_check=None, publisher_name=None, version=None):
super(ExtensionState, self).__init__(flags=flags, installation_issues=installation_issues, last_updated=last_updated)
self.extension_name = extension_name
self.last_version_check = last_version_check
self.publisher_name = publisher_name
self.version = version
| [
"[email protected]"
] | |
7435d09d750a32d9ad61e573a52cf8f14a6cd851 | 88e200b437f6867b525b680615982b86a1950052 | /pyramid_pony/route_factory.py | 21cfb9a4757fe43bc78aaed0c133e0ddb9fa47c3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | knzm/pyramid_pony | 3abe089ecbc12824a2b4e06e6c2799c8c04de7ed | 1c9d5503c6e3ce73c7416e215621c481f817be1b | refs/heads/master | 2021-01-20T21:59:24.236296 | 2012-12-21T13:14:51 | 2012-12-21T13:14:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | import base64
import zlib
from pyramid.response import Response
from pyramid.decorator import reify
from .pony import PONY, UNICORN, TEMPLATE
class PonyContext(object):
def __init__(self, request):
self.request = request
if request.params.get("horn"):
self.data = UNICORN
self.link = "remove horn!"
self.url = request.path
else:
self.data = PONY
self.link = "add horn!"
self.url = request.path + "?horn=1"
@reify
def home(self):
self.request.script_name or "/"
def decode(self, data):
data = base64.b64decode(data)
return zlib.decompress(data).decode('ascii')
def view(request):
context = request.context
data = context.data
html = TEMPLATE.format(
animal=context.decode(data),
url=context.url,
link=context.link,
home=context.home)
return Response(html)
def includeme(config):
config.add_route("pony", "/pony", factory=PonyContext)
config.add_view(view, route_name='pony')
| [
"[email protected]"
] | |
cc1b44e28dc088fc406e02d8e4a51fc84ad24327 | 033d29637f5839a5b18c0a93296efabaf1f532ce | /misc/largest_k_-k.py | abc2ea7d70c38b1a3537c58b2333b837ee81d87d | [] | no_license | dhumindesai/Problem-Solving | 80ea996010b7d802b6479d91117e981e88139d17 | 97dab280378bf8d950b75caec9f7f62c71db812c | refs/heads/master | 2021-12-04T07:29:40.851218 | 2021-11-27T17:33:18 | 2021-11-27T17:33:18 | 240,163,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | '''
nlogn
1
'''
def solution(nums):
if len(nums) < 2:
return 0
nums.sort()
first = 0
last = len(nums) - 1
while first < last and nums[first] < 0 and nums[last] > 0:
if abs(nums[first]) == nums[last]:
return nums[last]
if abs(nums[first]) > nums[last]:
first += 1
else:
last -= 1
return 0
'''
O(n)
O(n)
'''
def solution_2(nums):
if len(nums) < 2:
return 0
seen = set()
for num in nums:
seen.add(num)
result = 0
for num in nums:
if num < 0 and abs(num) in seen:
result = max(result, abs(num))
if num > 0 and -num in seen:
result = max(result, num)
return result
print(solution([3, 2, -2, 5, -3]))
print(solution([1, 2, 3, -4]))
print(solution([100, 100, -100, -2, -2, 2, 1, -1]))
print()
print(solution_2([3, 2, -2, 5, -3]))
print(solution_2([1, 2, 3, -4]))
print(solution_2([100, 100, -100, -2, -2, 2, 1, -1])) | [
"[email protected]"
] | |
684a26868d7e9316c6deca064b99cda32aa445cb | ceead28beb1ea6cb56a2bb4472bc1d2396b39e6f | /gen_basis_helpers/shared/plane_equations.py | 58114f9c1a76e22a35f4bcdb77c1e1cfd158056a | [] | no_license | RFogarty1/plato_gen_basis_helpers | 9df975d4198bff7bef80316527a8086b6819d8ab | 8469a51c1580b923ca35a56811e92c065b424d68 | refs/heads/master | 2022-06-02T11:01:37.759276 | 2022-05-11T12:57:40 | 2022-05-11T12:57:40 | 192,934,403 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,268 | py |
import itertools as it
import math
import numpy as np
from . import simple_vector_maths as vectHelp
class ThreeDimPlaneEquation():
""" Class representing a 3-dimension plane equation ax + by +cz = d
"""
def __init__(self, a, b, c, d):
""" Initializer
Args:
a,b,c,d (floats): Parameters for the plane equation ax + by + cz = d
"""
self._eqTol = 1e-6
self.a = a
self.b = b
self.c = c
self.d = d
@classmethod
def fromTwoPositionVectors(cls, inpVectA, inpVectB, normaliseCoeffs=True):
""" Alternative initializer. Creates the object using two input POSITION vectors (i.e. both MUST pass through origin)
Args:
inpVectA: (len 3 float iter) Position vector
inpVectB: (len 3 float iter) Position vector
normaliseCoeffs: (Bool, Optional) If True then always return coefficients for the normalised (i.e. unit) vector normal to the plane. Default=True
"""
vectA, vectB = np.array(inpVectA), np.array(inpVectB)
normVect = np.cross(vectA,vectB)
if normaliseCoeffs:
lenNormVect = math.sqrt( sum([x**2 for x in normVect]) )
normVect = np.array( [x/lenNormVect for x in normVect] )
#D is always zero, since the position vectors are also points on the plane
outCoeffs = [x for x in normVect] + [0]
return cls(*outCoeffs)
def __eq__(self,other):
eqTol = min(self._eqTol, other._eqTol)
for cA,cB in it.zip_longest(self.coeffs, other.coeffs):
if (abs(cA-cB) > eqTol):
return False
return True
def getSignedDistanceOfPointFromPlane(self, inpXyz):
""" Calculates the signed fistance of a point from the plane. If the normal vector points towards the point, the distance is +ve, if it points in the opposite direction it is negative
Args:
inpXyz: (len 3 float iter) [x,y,z] co-ordinates
Returns:
outDist: (float) The signed distance between the input point and the nearest point on this plane
"""
#Step 1 = Find the d value for the parralel plane this lies on; the displacement vector between point and plane is then the vector normal to this plane
#Step 2 = use the fact the normal vector points the same way (or the exact opposite way) as the displacement vector to get a signed distance
dValForThisPoint = self.calcDForInpXyz(inpXyz)
diffInDVals = dValForThisPoint - self.d #Using the absolute value here would give the unsigned distance
lenNormalVectorToThisPlane = math.sqrt( (self.a**2) + (self.b**2) + (self.c**2) )
outDist = diffInDVals / lenNormalVectorToThisPlane
return outDist
def getDistanceOfPointFromPlane(self, inpXyz):
""" Calculates the distance of a point from the plane
Args:
inpXyz: (len 3 float iter) [x,y,z] co-ordinates
Returns
outDist: (float) The distance between input point and the nearest point on this plane
"""
return abs( self.getSignedDistanceOfPointFromPlane(inpXyz) )
def calcDForInpXyz(self, inpXyz):
""" For a given xyz calculate d. If d=self.d then the point lies on this plane; else it lies on a parralel plane with d being the output to this function
Args:
inpXyz: (len 3 float iter) co-ordinates for a point
"""
assert len(inpXyz) == 3
return sum( [param*coord for param,coord in it.zip_longest([self.a,self.b,self.c],inpXyz)] )
def getPointClosestToOrigin(self):
""" Returns the point on this plane closest to the origin """
coeffs = self.coeffs
normVal = 1/( sum([x**2 for x in self.coeffs[:3]]) )
outPoint = [x*coeffs[-1]*normVal for x in self.coeffs[:3]]
#Internal checks; can probably remove later
#Firstly check point is expected distance from plane
errorTol = 1e-4 #
expDist = self.getDistanceOfPointFromPlane([0,0,0])
actDist = vectHelp.getLenOneVector(outPoint)
if abs(expDist-actDist)>errorTol:
raise ValueError("Some mistake in my coding here")
#Secondly check point is distance of zero from the plane (i.e. it lies on the plane)
if abs( self.getDistanceOfPointFromPlane(outPoint) ) > errorTol:
raise ValueError("Some mistake in my coding here")
return outPoint
@property
def coeffs(self):
""" Returns a,b,c,d coeffs (in ax + by +cz = d) as a len-4 iter
"""
return [self.a,self.b,self.c,self.d]
@coeffs.setter
def coeffs(self,val):
self.a, self.b, self.c, self.d = val
def getOutOfPlaneDistTwoPoints(posA, posB, planeEqn):
""" Description of function
Args:
posA: (len-3 iter) [x,y,z]
posB: (len-3 iter) [x,y,z]
planeEqn: (ThreeDimPlaneEquation)
Returns
outDist: The out of plane distance between the two points. This is the distance that would remain if we shifted posA along the surface normal such that it was in the same plane (with planeEqn) as posB
IMPORTANT:
This is obviously NOT aware of periodic boundaries. Likely you want to pass the nearest images in
"""
distFromPlaneA = planeEqn.getSignedDistanceOfPointFromPlane(posA[:3])
distFromPlaneB = planeEqn.getSignedDistanceOfPointFromPlane(posB[:3])
interPlaneDist = abs( distFromPlaneA-distFromPlaneB)
totalDist = vectHelp.getDistTwoVectors(posA[:3], posB[:3])
outOfPlaneDist = math.sqrt( (totalDist**2) - (interPlaneDist**2) )
return outOfPlaneDist
def getInterPlaneDistTwoPoints(posA, posB, planeEqn):
""" Gets the inter-plane distance between two points. This is the distance between planeA and planeB, which are both parralel to planeEqn and contain posA and posB respectively
Args:
posA: (len-3 iter) [x,y,z] Position of point A
posB: (len-3 iter) [x,y,z] Position of point B
planeEqn: (ThreeDimPlaneEquation)
Returns
outDist: (float) As description says; the distance between parralel planes containing point A and point B (and both parralel to planeEqn)
"""
distFromPlaneA = planeEqn.getSignedDistanceOfPointFromPlane(posA)
distFromPlaneB = planeEqn.getSignedDistanceOfPointFromPlane(posB)
distAB = abs( distFromPlaneA - distFromPlaneB )
return distAB
def getVectorToMoveFromParallelPlanesAToB(planeA, planeB, parallelTol=1e-6):
""" Gets the vector to move (in the shortest distance) from planeA to planeB.
Args:
planeA: (ThreeDimPlaneEquation)
planeB: (ThreeDimPlaneEquation)
parralelTol: (float) The magnitude of dot products between planeA and planeB normal vectors needs to be this close to 1 (tolerance is here to account for float errors)
Returns
outVector: (len-3 iter) Vector allowing movement from planeA to planeB.
NOTE:
This works by getting the distance closest to origin for one plane, and getting the distance of that point from the other plane. This only works if the two planes are parralel and would give an essentially arbitrary value otherwise. Thus, I'm not sure what kind of errors you'd get for near-parralel planes (e.g. within numerical error). Suspect not a massive issue since near-parralel planes are unlikely (never?) going to intersect near origin
Raises:
ValueError: If the planes intersect. This wont be perfect due to float errors.
"""
#Check for non parallel plane (presumably dot products can be used)
if not checkPlanesAreParralelSimple(planeA, planeB, parallelTol=parallelTol):
raise ValueError("planeEqns with coefficients {} and {} ARE NOT PARRALEL".format( planeA.coeffs[:3], planeB.coeffs[:3] ))
#Get the distance between planes if their parralel
pointOnB = planeB.getPointClosestToOrigin()
signedDist = planeA.getSignedDistanceOfPointFromPlane(pointOnB)
normVector = vectHelp.getUnitVectorFromInpVector(planeA.coeffs[:3])
tVect = [signedDist*x for x in normVector]
return tVect
def checkPlanesAreParralelSimple(planeEqnA, planeEqnB, parallelTol=1e-6):
""" Tries to check whether two planes are parallel by using the dot products of their normal vectors.
Args:
planeEqnA: (ThreeDimPlaneEquation)
planeEqnB: (ThreeDimPlaneEquation)
parallelTol: The magnitude of dot products between planeA and planeB normal vectors needs to be this close to 1 (tolerance is here to account for float errors)
Returns
isParallel: (Bool) Returns True if planes are parallel and False otherwise
"""
normVectA, normVectB = [ vectHelp.getUnitVectorFromInpVector(planeEqnA.coeffs[:3]), vectHelp.getUnitVectorFromInpVector(planeEqnB.coeffs[:3]) ]
absDotProd = abs(vectHelp.getDotProductTwoVectors(normVectA, normVectB))
if abs(absDotProd-1)>parallelTol:
return False
return True
| [
"[email protected]"
] | |
840a73d19aa4538be797d0689823d100f210cb1c | e35c72a64a0c279bfb223fef74cc4274626b75f8 | /MovieProject/MovieProject/wsgi.py | effee42efb370320fc56988c74390c6e42855f6b | [
"MIT"
] | permissive | zhumakova/MovieProject | 0b28a8c02c22e4888796567d922d0620ded2e65e | 751326367ddf25a8762617c2cc6bdcb8e0f38eff | refs/heads/main | 2023-06-03T23:51:00.952682 | 2021-06-29T09:39:03 | 2021-06-29T09:39:03 | 376,356,669 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for MovieProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MovieProject.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
455f694797086a12a6d3cf38d631a3b1619ce791 | 071416b05026dfc5e21e32c7f1846ab8475acf97 | /regularexpresion/vhcle.py | 002ceb0f770b467d834b8e03d0e3f5a4a92594b1 | [] | no_license | Sreerag07/bankproject | a7acf65b45c9e5c3ccace7ff3d755c33cf8a4fb0 | bb28e7c92cbfa1c1810d20eb4767a479eee5f015 | refs/heads/master | 2023-04-20T01:15:47.496059 | 2021-05-19T08:03:54 | 2021-05-19T08:03:54 | 368,788,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | # import re
#
# n=input("Enter the number")
# x='[A-Z]{2}[\d]{2}[A-Z]{1}[\d]{4}$'
# match=re.fullmatch(x,n)
# if match is not None:
# print("valid")
# else:
# print("invalid")
import re
n=input("Enter the number")
x='[A-Za-z0-9][@][g][m][a][i][l][.][c][o][m]$'
match=re.fullmatch(x,n)
if match is not None:
print("valid")
else:
print("invalid") | [
"[email protected]"
] | |
a22ad370ef14f3c0d4a62dbae1dc601050b1b1b5 | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v3/proto/errors/media_file_error_pb2.py | d318188d9d1d4fff66dcb9a080883c65c4cf1daa | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | true | 8,029 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/errors/media_file_error.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/errors/media_file_error.proto',
package='google.ads.googleads.v3.errors',
syntax='proto3',
serialized_options=_b('\n\"com.google.ads.googleads.v3.errorsB\023MediaFileErrorProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v3/errors;errors\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V3.Errors\312\002\036Google\\Ads\\GoogleAds\\V3\\Errors\352\002\"Google::Ads::GoogleAds::V3::Errors'),
serialized_pb=_b('\n;google/ads/googleads_v3/proto/errors/media_file_error.proto\x12\x1egoogle.ads.googleads.v3.errors\x1a\x1cgoogle/api/annotations.proto\"\x97\x06\n\x12MediaFileErrorEnum\"\x80\x06\n\x0eMediaFileError\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x1f\n\x1b\x43\x41NNOT_CREATE_STANDARD_ICON\x10\x02\x12\x30\n,CANNOT_SELECT_STANDARD_ICON_WITH_OTHER_TYPES\x10\x03\x12)\n%CANNOT_SPECIFY_MEDIA_FILE_ID_AND_DATA\x10\x04\x12\x13\n\x0f\x44UPLICATE_MEDIA\x10\x05\x12\x0f\n\x0b\x45MPTY_FIELD\x10\x06\x12\'\n#RESOURCE_REFERENCED_IN_MULTIPLE_OPS\x10\x07\x12*\n&FIELD_NOT_SUPPORTED_FOR_MEDIA_SUB_TYPE\x10\x08\x12\x19\n\x15INVALID_MEDIA_FILE_ID\x10\t\x12\x1a\n\x16INVALID_MEDIA_SUB_TYPE\x10\n\x12\x1b\n\x17INVALID_MEDIA_FILE_TYPE\x10\x0b\x12\x15\n\x11INVALID_MIME_TYPE\x10\x0c\x12\x18\n\x14INVALID_REFERENCE_ID\x10\r\x12\x17\n\x13INVALID_YOU_TUBE_ID\x10\x0e\x12!\n\x1dMEDIA_FILE_FAILED_TRANSCODING\x10\x0f\x12\x18\n\x14MEDIA_NOT_TRANSCODED\x10\x10\x12-\n)MEDIA_TYPE_DOES_NOT_MATCH_MEDIA_FILE_TYPE\x10\x11\x12\x17\n\x13NO_FIELDS_SPECIFIED\x10\x12\x12\"\n\x1eNULL_REFERENCE_ID_AND_MEDIA_ID\x10\x13\x12\x0c\n\x08TOO_LONG\x10\x14\x12\x14\n\x10UNSUPPORTED_TYPE\x10\x15\x12 \n\x1cYOU_TUBE_SERVICE_UNAVAILABLE\x10\x16\x12,\n(YOU_TUBE_VIDEO_HAS_NON_POSITIVE_DURATION\x10\x17\x12\x1c\n\x18YOU_TUBE_VIDEO_NOT_FOUND\x10\x18\x42\xee\x01\n\"com.google.ads.googleads.v3.errorsB\x13MediaFileErrorProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v3/errors;errors\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V3.Errors\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V3\\Errors\xea\x02\"Google::Ads::GoogleAds::V3::Errorsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_MEDIAFILEERRORENUM_MEDIAFILEERROR = _descriptor.EnumDescriptor(
name='MediaFileError',
full_name='google.ads.googleads.v3.errors.MediaFileErrorEnum.MediaFileError',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CANNOT_CREATE_STANDARD_ICON', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CANNOT_SELECT_STANDARD_ICON_WITH_OTHER_TYPES', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CANNOT_SPECIFY_MEDIA_FILE_ID_AND_DATA', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DUPLICATE_MEDIA', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EMPTY_FIELD', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESOURCE_REFERENCED_IN_MULTIPLE_OPS', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FIELD_NOT_SUPPORTED_FOR_MEDIA_SUB_TYPE', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_MEDIA_FILE_ID', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_MEDIA_SUB_TYPE', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_MEDIA_FILE_TYPE', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_MIME_TYPE', index=12, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_REFERENCE_ID', index=13, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_YOU_TUBE_ID', index=14, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MEDIA_FILE_FAILED_TRANSCODING', index=15, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MEDIA_NOT_TRANSCODED', index=16, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MEDIA_TYPE_DOES_NOT_MATCH_MEDIA_FILE_TYPE', index=17, number=17,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NO_FIELDS_SPECIFIED', index=18, number=18,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NULL_REFERENCE_ID_AND_MEDIA_ID', index=19, number=19,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TOO_LONG', index=20, number=20,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNSUPPORTED_TYPE', index=21, number=21,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='YOU_TUBE_SERVICE_UNAVAILABLE', index=22, number=22,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='YOU_TUBE_VIDEO_HAS_NON_POSITIVE_DURATION', index=23, number=23,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='YOU_TUBE_VIDEO_NOT_FOUND', index=24, number=24,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=149,
serialized_end=917,
)
_sym_db.RegisterEnumDescriptor(_MEDIAFILEERRORENUM_MEDIAFILEERROR)
_MEDIAFILEERRORENUM = _descriptor.Descriptor(
name='MediaFileErrorEnum',
full_name='google.ads.googleads.v3.errors.MediaFileErrorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_MEDIAFILEERRORENUM_MEDIAFILEERROR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=126,
serialized_end=917,
)
_MEDIAFILEERRORENUM_MEDIAFILEERROR.containing_type = _MEDIAFILEERRORENUM
DESCRIPTOR.message_types_by_name['MediaFileErrorEnum'] = _MEDIAFILEERRORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MediaFileErrorEnum = _reflection.GeneratedProtocolMessageType('MediaFileErrorEnum', (_message.Message,), dict(
DESCRIPTOR = _MEDIAFILEERRORENUM,
__module__ = 'google.ads.googleads_v3.proto.errors.media_file_error_pb2'
,
__doc__ = """Container for enum describing possible media file errors.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.errors.MediaFileErrorEnum)
))
_sym_db.RegisterMessage(MediaFileErrorEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
6297a0b3ab64c412a408bdaf19c685189ffbb67f | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /P.O.R.-master/pirates/battle/SkillInfo.py | f3c0400bf8136861c1c9483f67974ca73307ee82 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 76,761 | py | skillInfo = {2060: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 2, 15, 0, 0, 0, 0, 1, 0, 3, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2061: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 2, 15, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2062: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 2, 15, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2063: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 2, 120, 0, 0, 0, 0, 1, 0, 0, 0, 0, 21, 0, 0, 1, 0, 0, 1, 10, 2, -1, 0, 1.2, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 8, 1, 1, 0, 0], 2064: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 2, 15, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1.25, 0, 0], 2065: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 2, 120, 0, 0, 0, 0, 1, 0, 0, 0, 0, 5, 0, 0, 1, 0, 0, 1, 10, 2, -1, 0, 1.2, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 8, 1, 1, 0, 0], 2080: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'sail_open_fire', 0, 0, 1, 1.16, 0], 2081: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pistol_take_aim', 0, 0, 1, 0, 0], 2100: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'staff_sparks', 0, 0, 1, 0, 0], 2101: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'staff_sparks', 0, 0, 1, 0, 0], 2102: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'staff_frostbeam', 0, 0, 1, 0, 0], 2103: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'staff_pestilence', 0, 0, 1, 0, 0], 2104: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'staff_fire', 0, 0, 1, 0, 0], 2105: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'staff_deluge', 0, 0, 1, 0, 0], 2106: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'staff_shockwave', 0, 0, 1, 0, 0], 2120: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'grenade_throw', 0, 2, 1, 2, 0], 2121: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'grenade_strength', 0, 2, 1, 0, 0], 2140: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2141: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2142: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1.5, 0, 0], 2143: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 10, 12, 0, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 0, 1, 0, 0, 1, 4, 2, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 10, 1, 0.5, 0, 0], 2144: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 6, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 2, 0, 0], 2145: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 15, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 6, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 0.75, 0, 0], 2160: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2161: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2162: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1.25, 0, 0], 2163: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 25, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 0, 1, 0, 0, 1, 6, 2, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 10, 1, 0.75, 0, 0], 2164: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 18, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 6, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 6, 1, 1.5, 0, 0], 12405: [13706, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_blade_instinct', 0, 0, 1, 0, 0], 12406: [13706, 2, 14300, None, 2, 0, 0, 0, -10, 0, 0, 0, 80, 0.25, 15, 50, 0, 0, 0, 0, 1, 0, 0, 1, 1, 7, 0, 0, 1, 0, 1, 1, 6, 0, 25, 0, 0.8, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'dagger_throw', 16, 0, 1, 1.83, 0], 12407: [13706, 2, 14301, None, 4, 0, 0, 0, -20, 0, 0, 0, 80, 0.25, 15, 50, 0, 0, 0, 0, 1, 0, 0, 0, 0, 5, 0, 0, 1, 0, 1, 1, 20, 2, 25, 0, 0.8, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'dagger_adder', 8, 0, 1, 1.83, 0], 12408: [13706, 2, None, None, 6, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 25, 12, 0, 0, 0, 0, 1, 0, 0, 1, 1, 19, 0, 0, 1, 0, 0, 1, 6, 0, -1, 0, 0.8, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'dagger_throw_dirt', 18, 1, 1, 1.58, 0], 12409: [13706, 2, 14302, None, 8, 0, 0, 0, -60, 0, 0, 0, 80, 0.25, 20, 50, 0, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 0, 1, 0, 1, 1, 6, 2, 25, 0, 0.8, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_sidewinder', 10, 0, 1, 1, 0], 12410: [13706, 2, 14303, None, 10, 0, 0, 0, -120, 0, 0, 0, 80, 0.25, 45, 50, 0, 0, 0, 0, 1, 0, 2, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 25, 0, 0.8, 3, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_vipers_nest', 30, 2, 1, 0.833, 0], 2180: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2181: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2182: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 20, 10, 10, 0, 0, 0, 1, 1, 1, 0, 0, 14, 0, 0, 1, 0, 0, 1, 4, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 0.8, 0, 0], 2183: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 15, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 4, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1.5, 0, 0], 2200: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2201: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2202: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 25, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 5, 0, 0, 1, 0, 0, 1, 12, 2, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 0.25, 0, 0], 2203: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 25, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 14, 0, 0, 1, 0, 0, 1, 6, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1.25, 0, 0], 2220: [13712, 5, None, None, 1, 1, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_shoot', 6, 1, 1, 0, 0], 2221: [13712, 5, None, None, 1, 1, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_shoot', 6, 1, 1, 0, 0], 2238: [13703, 1, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 20, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 159, 0, 0, 0, 0, 0, 0, 12, 0, -1, 0, 0.6, 1, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_parry', 45, 0, 1, 1.42, 0], 2239: [13703, 1, None, None, 2, 0, 0, 0, -200, 0, 0, 0, 100, 0.25, 15, 6, 0, 0, 0, 0, 1, 0, 0, 1, 1, 156, 0, 0, 1, 0, 0, 1, 6, 0, -1, 0, 0.5, 2, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, u'dagger_cut', 42, 1, 1.5, 1, 0], 2240: [13703, 1, None, None, 1, 0, 0, 0, -30, 0, 0, 0, 80, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 6, 0, -1, 0, 0.22, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 3, 1, 1, 1, 0], 2241: [13703, 1, None, None, 1, 0, 0, 0, -30, 0, 0, 0, 100, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.22, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 1.2, 1, 0], 2242: [13703, 1, None, None, 1, 0, 0, 0, -30, 0, 0, 0, 95, 0.25, 2, 10, 0, 0, 0, 0, 1, 0, 3, 0, 0, 7, 0, 0, 1, 0, 0, 1, 5, 0, -1, 0, 0.22, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 29, 1, 1.2, 1, 0], 2243: [13703, 1, None, None, 1, 0, 0, 0, -30, 0, 0, 0, 100, 0.25, 2, 10, 0, 0, 0, 0, 1, 0, 3, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.22, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 29, 1, 0.75, 1, 0], 2244: [13703, 1, None, None, 1, 0, 0, 0, -100, 0, 0, 0, 80, 0.25, 10, 40, 0, 0, 0, 0, 1, 0, 2, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_dodge', 3, 1, 2, 1.5, 0], 2245: [13703, 1, None, None, 1, 0, 0, 0, -30, 0, 0, 0, 100, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.22, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 1, 1, 0], 2246: [13703, 1, None, None, 1, 0, 0, 0, -30, 0, 0, 0, 50, 0.25, 10, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 0, 1, 0, 0, 1, 5, 2, -1, 0, 0.22, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 1.5, 1, 0], 2247: [13703, 1, None, None, 1, 0, 0, 0, -30, 0, 0, 0, 100, 0.25, 6, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 4, 0, -1, 0, 0.22, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 2, 1, 0], 2248: [13703, 1, None, None, 1, 0, 0, 0, -30, 0, 0, 0, 100, 0.25, 10, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 14, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.22, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 0.75, 1, 0], 2250: [13703, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 2, 7, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.22, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 1, 1, 0], 2251: [13703, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 2, 7, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.22, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 0.75, 1, 0], 2252: [13703, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 2, 7, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.44, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 1, 1, 0], 2253: [13703, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 6, 7, 0, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 1, 10, 2, -1, 0, 0.22, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 1, 1, 0], 2254: [13703, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 75, 0.25, 8, 7, 0, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 1, 10, 2, -1, 0, 0.22, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 0.75, 1, 0], 2256: [13703, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 10, 7, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.8, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 0.75, 1, 0], 2257: [13703, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 12, 7, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.22, 4, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 2, 1, 0], 12500: [13707, 4, None, None, 2, 1, 0, 0, -20, 0, 0, 0, 100, 0.25, 2, 200, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'grenade_grenadier', 0, 2, 1, 2, 0], 12501: [13707, 4, None, None, 1.5, 1, 0, 0, -90, 0, 0, 0, 100, 0.25, 1, 50, 10, 1, 0, 0, 1, 40, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'grenade_throw', 0, 2, 1, 0, 0], 12502: [13707, 4, 14101, None, 1.5, 1, 0, 0, -50, 0, 0, 0, 100, 0.25, 1, 50, 20, 1, 0, 0, 1, 40, 1, 0, 0, 7, 0, 0, 0, 0, 0, 1, 10, 0, 25, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'grenade_flameburst', 0, 2, 1, 0, 0], 12503: [13707, 4, 14102, None, 2, 1, 0, 0, -50, 0, 0, 0, 100, 0.25, 1, 50, 10, 1, 0, 0, 1, 40, 1, 0, 0, 3, 0, 0, 0, 0, 0, 1, 20, 2, 25, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_explosion', 9, 2, 1, 0, 0], 12504: [13707, 4, 14103, None, 2, 1, 0, 0, 0, 0, 0, 0, 100, 0.25, 1, 50, 14, 1, 0, 0, 1, 40, 1, 0, 0, 9, 0, 0, 0, 0, 0, 1, 10, 0, 15, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_smokecloud', 0, 2, 1, 0, 0], 12505: [13707, 4, 14105, None, 2, 1, 0, 0, -150, 0, 0, 0, 100, 0.25, 1, 10, 20, 1, 0, 0, 1, 40, 1, 0, 0, 7, 0, 0, 0, 0, 0, 1, 3, 0, 10, 0, 0, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_siege', 0, 2, 1, 0, 0], 12506: [13707, 4, None, None, 2, 1, 0, 0, -40, 0, 0, 0, 100, 0.25, 2, 400, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 2, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'grenade_strength', 0, 2, 1, 1.875, 0], 12507: [13707, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'grenade_determination2', 0, 0, 1, 0, 0], 12508: [13707, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_demolition', 0, 0, 1, 0, 0], 12509: [13707, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_determination', 0, 0, 1, 0, 0], 12510: [13707, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cannon_toughness', 0, 0, 1, 0, 0], 2271: [13706, 2, None, 12400, 4, 0, 0, 0, -9, 0, 0, 0, 80, 0.25, 0.5, 50, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 1, 1, 3, 0, -1, 0, 0.8, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_throw', 16, 0, 1, 1, 0], 2272: [13706, 2, None, 12401, 4, 0, 0, 0, -20, 0, 0, 0, 80, 0.25, 0.5, 50, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 1, 1, 3, 0, -1, 0, 0.8, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_sidewinder', 16, 0, 1, 1, 0], 2273: [13706, 2, None, 12402, 4, 0, 0, 0, -33, 0, 0, 0, 80, 0.25, 0.5, 50, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 1, 1, 3, 0, -1, 0, 0.8, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_impale', 16, 0, 1, 1, 0], 2274: [13706, 2, None, 12403, 4, 0, 0, 0, -51, 0, 0, 0, 80, 0.25, 0.5, 50, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 1, 1, 3, 0, -1, 0, 0.8, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_vipers_nest', 16, 0, 1, 1, 0], 2275: [13706, 2, None, None, 4, 0, 0, 0, -50, 0, 0, 0, 80, 0.25, 60, 30, 30, 0, 0, 0, 1, 0, 1, 0, 0, 164, 0, 0, 0, 0, 1, 1, 3, 0, -1, 0, 0.8, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'pir_t_gui_amo_can_iceshot', 13, 2, 1, 1, 1], 2280: [13706, 2, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 5, 100, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 0.8, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_throw', 16, 0, 1, 1.83, 0], 2281: [13706, 2, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 15, 100, 0, 0, 0, 0, 1, 0, 0, 0, 0, 5, 0, 0, 1, 0, 1, 1, 10, 2, -1, 0, 0.8, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_adder', 8, 0, 0.75, 1.83, 0], 2282: [13706, 2, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 30, 100, 0, 0, 0, 0, 1, 0, 0, 0, 0, 14, 0, 0, 1, 0, 1, 1, 4, 0, -1, 0, 0.8, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_sidewinder', 10, 0, 0.75, 1, 0], 2283: [13706, 2, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 30, 100, 0, 0, 0, 0, 1, 0, 0, 1, 1, 26, 0, 0, 1, 0, 1, 1, 3, 0, -1, 0, 0.8, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_throw', 16, 0, 1, 1, 0], 2284: [13706, 1, None, None, 0, 0, 0, 0, -150, 0, 0, 0, 100, 0.25, 15, 12, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_swipe', 6, 1, 1, 1, 0], 2285: [13706, 1, None, None, 4, 0, 0, 0, -100, 0, 0, 0, 100, 0.25, 30, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 1, 1, 6, 0, -1, 0, 0.8, 3, 0, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_flourish', 14, 0, 1, 1, 0], 2286: [13706, 2, None, None, 2, 0, 0, 0, -80, 0, 0, 0, 80, 0.25, 20, 50, 0, 0, 0, 0, 1, 0, 0, 1, 1, 7, 0, 0, 1, 0, 1, 1, 8, 0, -1, 0, 0.8, 3, 0, 5, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'dagger_blade_instinct', 16, 0, 1, 1.83, 0], 2287: [13706, 2, None, None, 4, 0, 0, 0, -70, 0, 0, 0, 80, 0.25, 20, 30, 30, 0, 0, 0, 1, 0, 1, 0, 0, 26, 0, 0, 0, 0, 1, 1, 3, 0, -1, 0, 0.8, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_sweep', 13, 0, 1, 1, 1], 2288: [13706, 1, None, None, 4, 0, 0, -50, -60, 0, 0, 0, 100, 0.25, 15, 10, 0, 0, 0, 0, 1, 0, 1, 0, 0, 5, 0, 0, 1, 0, 1, 1, 15, 2, -1, 0, 0.8, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_gouge', 8, 0, 1, 1, 0], 2289: [13706, 2, None, None, 8, 0, 0, -35, -40, 0, 0, 0, 80, 0.25, 20, 50, 0, 0, 0, 0, 1, 0, 0, 0, 0, 21, 0, 0, 1, 0, 1, 1, 10, 2, -1, 0, 0.8, 1, 0, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_impale', 8, 0, 1, 1, 0], 2290: [13703, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.45, 6, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 1, 1, 0], 2291: [13703, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 15, 6, 15, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.29, 4, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 29, 1, 2, 1, 0], 2292: [13703, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 12, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.62, 5, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 1.25, 1, 0], 2293: [13703, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 1, 10, 2, -1, 0, 0.5, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 1, 1, 0], 2294: [13703, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0.4, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 1, 1, 1, 1, 0], 2300: [13709, 3, None, None, 6, 0, 0, -60, 300, 0, 0, 0, 100, 0.25, 45, 130, 0, 0, 0, 1, 1, 0, 0, 0, 0, 166, 0, 0, 1, 1, 0, 0, 0, 0, -1, 0, 0.1, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, u'voodoo_will_power', 22, 5, 1, 2.625, 0], 2301: [13709, 3, None, None, 6, 0, 0, -50, 0, 0, 0, 0, 100, 0.25, 60, 130, 0, 0, 0, 1, 1, 0, 0, 0, 0, 35, 0, 0, 1, 1, 0, 0, 15, 0, -1, 0, 0.1, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, u'voodoo_hasten', 47, 5, 1, 2.625, 0], 2302: [13709, 3, None, None, 6, 0, 0, -50, 0, 0, 0, 0, 100, 0.25, 45, 130, 0, 0, 0, 1, 1, 0, 0, 0, 0, 34, 0, 0, 1, 1, 0, 0, 10, 0, -1, 0, 0.1, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, u'cutlass_bladestorm', 49, 5, 1, 2.625, 0], 2303: [13709, 3, None, None, 6, 0, 0, -50, 0, 0, 0, 0, 100, 0.25, 60, 130, 0, 0, 0, 1, 1, 0, 0, 0, 0, 37, 0, 0, 1, 1, 0, 0, 15, 0, -1, 0, 0.1, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, u'grenade_determination', 46, 5, 1, 2.625, 0], 2304: [13709, 3, None, None, 6, 0, 0, -50, 0, 0, 0, 0, 100, 0.25, 60, 130, 0, 0, 0, 1, 1, 0, 0, 0, 0, 36, 0, 0, 1, 1, 0, 0, 15, 0, -1, 0, 0.1, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, u'staff_spirit_lore', 48, 5, 1, 2.625, 0], 2305: [13709, 3, None, None, 6, 0, 0, -40, 0, 0, 0, 0, 100, 0.25, 45, 130, 0, 0, 0, 1, 1, 0, 0, 0, 0, 20, 0, 0, 1, 1, 0, 0, 20, 2, -1, 0, 0.1, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, u'staff_harmony', 25, 5, 1, 2.625, 0], 2306: [13709, 3, None, None, 0, 0, 0, -35, 0, 0, 0, 0, 100, 1, 10, 30, 30, 0, 0, 1, 1, 0, 2, 0, 0, 18, 0, 0, 0, 0, 0, 0, -1, 0, -1, 0, 0.1, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'grenade_determination2', 19, 2, 1, 0.78, 0], 2310: [13704, 1, None, None, 1, 0, 0, 0, -40, 0, 0, 0, 90, 0, 0.5, 10, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_cleave', 2, 1, 1, 1, 0], 2311: [13704, 1, None, None, 2, 0, 0, 0, -15, 0, 0, 0, 90, 0, 15, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 6, 0, -1, 0, 0.1, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_brawl', 6, 1, 0.75, 0, 0], 2312: [13704, 1, None, None, 2, 0, 0, 0, -60, 0, 0, 0, 70, 0, 10, 10, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 4, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_vipers_nest', 2, 1, 1.5, 0, 0], 2313: [13704, 2, None, None, 1, 0, 0, 0, -35, 0, 0, 0, 80, 0, 3, 150, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 0.1, 1, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'pistol_shoot', 0, 0, 0.6, 1, 0], 2314: [13704, 1, None, None, 2, 0, 0, 0, -15, 0, 0, 0, 90, 0, 6, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 7, 0, 0, 1, 0, 0, 1, 8, 0, -1, 0, 0.1, 1, 2, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_brawl', 6, 1, 0.75, 0, 0], 2315: [13704, 1, None, None, 2, 0, 0, 0, -60, 0, 0, 0, 70, 0, 6, 10, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 4, 2, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_vipers_nest', 2, 1, 1.5, 0, 0], 2316: [13704, 1, None, None, 2, 0, 0, 0, -25, 0, 0, 0, 90, 0, 6, 6, 0, 0, 0, 0, 1, 0, 0, 1, 1, 156, 0, 0, 1, 0, 0, 1, 6, 0, -1, 0, 0.1, 1, 2, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_toughness', 42, 1, 0.75, 0, 0], 2317: [13704, 1, None, None, 1, 0, 0, 0, -40, 0, 0, 0, 90, 0, 0.5, 10, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.33, 1, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_cleave', 2, 1, 0.5, 1, 0], 2320: [13703, 1, None, None, 1, 0, 0, 0, -60, 0, 0, 0, 80, 0.25, 4, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 6, 0, -1, 0, 0.22, 1, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_cleave', 1, 1, 1, 1, 0], 31000: [13716, 1, None, None, 3, 0, 0, 0, -1, 0, 0, 0, 100, 0.25, 15, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.22, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pir_t_gui_fsh_stall', 1, 1, 0.75, 0.625, 0], 31001: [13716, 1, None, None, 4, 0, 0, 0, -1, 0, 0, 0, 100, 0.25, 15, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.46, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pir_t_gui_fsh_pull', 2, 1, 1, 0.75, 0], 31002: [13716, 1, None, None, 6, 0, 0, 0, -1, 0, 0, 0, 100, 0.25, 30, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.7, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pir_t_gui_fsh_heal', 5, 1, 1.25, 1.125, 0], 31003: [13716, 1, None, None, 13, 0, 0, 0, -1, 0, 0, 0, 100, 0.25, 30, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.6, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'pir_t_gui_fsh_tug', 4, 1, 1.5, 1.58, 0], 31004: [13716, 1, None, None, 16, 0, 0, 0, -1, 0, 0, 0, 100, 0.25, 10, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.6, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'pir_t_gui_fsh_sink', 4, 1, 1.5, 1.58, 0], 31005: [13716, 1, None, None, 17, 0, 0, 0, -1, 0, 0, 0, 100, 0.25, 20, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.6, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'pir_t_gui_fsh_oceanEye', 4, 1, 1.5, 1.58, 0], 2334: [13703, 1, None, None, 1, 0, 0, -60, -50, 0, 0, 0, 100, 0.25, 30, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 164, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.22, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pir_t_gui_amo_can_iceshot', 31, 1, 0.75, 0.625, 0], 2335: [13703, 1, None, None, 1, 0, 0, -80, -100, 0, 0, 0, 100, 0.25, 15, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.22, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_thunderbolt', 44, 1, 0.75, 0.625, 0], 2336: [13703, 1, None, None, 2, 0, 0, 0, -80, 0, 0, 0, 100, 0.25, 30, 6, 15, 0, 0, 0, 1, 0, 1, 1, 1, 156, 0, 0, 1, 0, 0, 0, 6, 0, -1, 0, 0.6, 1, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_shrapnel', 42, 1, 1.25, 1.42, 0], 2337: [13703, 1, None, None, 2, 0, 0, 0, -100, 0, 0, 0, 100, 0.25, 30, 6, 15, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0.6, 4, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'grenade_determination', 29, 1, 1.25, 1.42, 0], 2340: [13704, 2, None, 12200, 1, 0, 0, 0, -30, 0, 0, 0, 70, 0.25, 0.5, 70, 70, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'sail_task_master', 0, 0, 1, 0.55, 0], 2341: [13704, 2, None, 12200, 2, 0, 0, 0, -50, 0, 0, 0, 100, 0.25, 15, 100, 0, 0, 0, 0, 1, 0, 0, 0, 0, 7, 0, 0, 1, 0, 1, 1, 5, 0, -1, 0, 0.1, 1, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pistol_eagle_eye', 6, 0, 1, 0.55, 0], 2342: [13704, 2, None, None, 2, 0, 0, 0, -25, 0, 0, 0, 90, 0.25, 15, 70, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 0.1, 3, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_barrage', 0, 0, 1, 0.55, 0], 2343: [13704, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 60, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 157, 0, 0, 0, 0, 0, 0, 10, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'sail_open_fire', 45, 0, 1, 1.16, 0], 2344: [13704, 2, None, None, 1, 0, 0, 0, -30, 0, 0, 0, 70, 0.25, 10, 40, 70, 0, 0, 0, 1, 0, 2, 1, 0, 6, 0, 0, 1, 0, 0, 1, 5, 0, -1, 0, 0.1, 2, 2, 5, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pir_t_gui_amo_can_scatter', 42, 0, 1, 0.55, 0], 2345: [13704, 2, None, None, 1, 0, 0, 0, -30, 0, 0, 0, 70, 0.25, 10, 40, 70, 0, 0, 0, 1, 0, 2, 0, 0, 4, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 3, 2, 5, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_grape_shot', 10, 0, 1, 0.55, 0], 2346: [13704, 2, None, None, 2, 0, 0, 0, -200, 0, 0, 0, 80, 0.25, 10, 20, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 0.1, 1, 0, 5, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pistol_shoot', 6, 0, 1, 0.55, 0], 2347: [13704, 2, None, None, 2, 0, 0, 0, -50, 0, 0, 0, 100, 0.25, 15, 100, 0, 0, 0, 0, 1, 0, 0, 0, 0, 3, 0, 0, 1, 0, 1, 1, 10, 0, -1, 0, 0.1, 1, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pir_t_gui_amo_can_firestorm', 9, 0, 1, 0.55, 0], 2348: [13704, 2, None, 12210, 5, 0, 0, 0, -30, 0, 0, 0, 70, 0.25, 1, 70, 70, 0, 0, 0, 1, 0, 2, 0, 0, 7, 0, 0, 1, 0, 1, 1, 3, 0, -1, 2, 0.1, 1, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pistol_take_aim', 0, 0, 1, 0.55, 0], 2350: [13703, 1, None, 12100, 1, 0, 0, 0, -12, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.22, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_hack', 50, 1, 0.5, 0.375, 0], 2351: [13703, 1, None, 12101, 2, 0, 0, 0, -33, 0, 0, 0, 100, 0.25, 0.5, 6, 10, 0, 0, 0, 1, 0, 1, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.46, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_slash', 51, 1, 1.025, 0.775, 0], 2352: [13703, 1, None, 12102, 2, 0, 0, 0, -45, 0, 0, 0, 100, 0.25, 0.5, 6, 15, 0, 0, 0, 1, 0, 1, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.7, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_cleave', 52, 1, 0.875, 0.75, 0], 2353: [13703, 1, None, 12103, 4, 0, 0, 0, -80, 0, 0, 0, 100, 0.25, 0.5, 6, 15, 0, 0, 0, 1, 0, 1, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.6, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_flourish', 53, 1, 0.6, 0.6, 0], 2354: [13703, 1, None, 12104, 6, 0, 0, 0, -110, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.6, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 54, 1, 1.4, 0.95, 0], 12600: [13709, 3, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 1, 0.5, 6, 0, 0, 0, 1, 1, 0, 0, 0, 0, 18, 0, 0, 1, 0, 0, 0, -1, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'voodoo_attune', 19, 2, 1, 0.78, 0], 12601: [13709, 3, None, None, 4, 0, 0, -5, -65, 0, 0, 0, 100, 0.25, 4, 130, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0.6, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'voodoo_poke', 20, 2, 0.5, 2.08, 0], 12602: [13709, 3, None, None, 5, 0, 0, -20, -100, 0, 0, 0, 100, 0.25, 10, 130, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0.4, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'voodoo_swarm', 21, 2, 2, 2.625, 0], 12603: [13709, 3, None, None, 10, 0, 0, -15, 180, 0, 0, 0, 100, 0.25, 10, 130, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0.3, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'voodoo_heal', 22, 2, 1, 2.625, 0], 2364: [13703, 1, None, 12104, 6, 0, 0, 0, -83, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.6, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 59, 1, 1.4, 0.7, 0], 12605: [13709, 3, None, None, 7.5, 0, 0, -35, -25, 0, 0, 0, 100, 0.25, 25, 130, 0, 0, 0, 0, 1, 0, 0, 0, 0, 3, 0, 0, 1, 0, 0, 1, 15, 2, -1, 0, 0.3, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, u'voodoo_burn', 9, 2, 1, 2.625, 0], 12606: [13709, 3, None, None, 20, 0, 0, -40, 360, 0, 0, 0, 100, 0.25, 30, 130, 0, 0, 0, 1, 1, 0, 0, 0, 0, 15, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0.3, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_cure', 25, 2, 1, 2.625, 0], 12607: [13709, 3, None, None, 20, 0, 0, -50, 0, 0, 0, 0, 100, 0.25, 60, 130, 0, 0, 0, 0, 1, 0, 0, 0, 0, 8, 0, 0, 1, 0, 0, 1, 5, 0, -1, 0, 0.1, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_shackle', 24, 2, 1, 2.625, 0], 12608: [13709, 3, None, None, 20, 0, 0, -100, -200, 0, 0, 0, 100, 0.25, 30, 130, 0, 0, 0, 0, 1, 0, 0, 0, 0, 23, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_life_drain', 27, 2, 2, 2.625, 0], 12609: [13709, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0.1, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_focus', 0, 0, 1, 0, 0], 12610: [13709, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_will_power', 0, 0, 1, 0, 0], 2371: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 0.5, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_heal', 45, 2, 1, 2.625, 0], 2372: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 0.5, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_cure', 45, 2, 1, 2.625, 0], 2373: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 0.5, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_curse', 45, 2, 1, 2.625, 0], 12650: [13712, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0.05, 15, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'sail_broadside_left', 0, 1, 1, 0, 0], 12651: [13712, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0.05, 15, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'sail_broadside_right', 0, 1, 1, 0, 0], 12652: [13712, 0, None, None, 2, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 90, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 10, 0, -1, 0, 0, 1, 0, 2, 1, 4, 4, 0, 0, 0, 0, 0, 1, 1, u'sail_full_sail', 0, 1, 1, 0, 0], 12653: [13712, 0, None, None, 2, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 90, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 101, 0, 0, 0, 0, 0, 0, 5, 0, -1, 0, 0, 1, 0, 2, 1, 0.25, 0.25, 0, 0, 4, 4, 0, 1, 1, u'sail_come_about', 0, 1, 1, 0, 0], 12654: [13712, 0, None, None, 4, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 60, -1, 150, 1, 0, 1, 0, 0, 1, 0, 0, 102, 0, 0, 0, 0, 0, 0, 6, 0, -1, 0, 0, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, u'sail_openfire2', 0, 1, 1, 0, 0], 12655: [13712, 0, None, None, 20, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 90, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 103, 0, 0, 0, 0, 0, 0, 5, 0, -1, 0, 0, 1, 0, 2, 1, 6, 6, 0, 0, 0, 0, 0, 0, 0, u'sail_ramming_speed', 0, 1, 1, 0, 0], 12656: [13712, 0, None, None, 4, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 60, -1, 150, 1, 0, 1, 0, 0, 1, 0, 0, 104, 0, 0, 0, 0, 0, 0, 10, 0, -1, 0, 0, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, u'sail_take_cover', 0, 1, 1, 0, 0], 12657: [13712, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0.04, 0.04, 0.04, 0.04, 0, 0, 0, 1, 1, u'sail_wind_catcher', 0, 1, 1, 0, 0], 12658: [13712, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0.04, 0.04, 0, 0, 0, u'sail_tacking', 0, 1, 1, 0, 0], 12659: [13712, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'sail_treasure_sense', 0, 1, 1, 0, 0], 12660: [13712, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'sail_task_master', 0, 1, 1, 0, 0], 12661: [13712, 0, None, None, 4, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 120, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 105, 0, 0, 0, 0, 0, 0, 10, 0, -1, 0, 0, 1, 0, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, u'sail_recharge', 0, 1, 1, 0, 0], 12700: [13708, 3, None, None, 0.5, 0, 0, 0, -10, 0, 0, 0, 90, 0.25, 0.8, 70, 3, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0.7, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'staff_blast', 31, 0, 1, 1.33, 0], 12701: [13708, 3, None, None, 2.5, 0, 0, -30, -50, 0, 0, 0, 100, 0.25, 0.5, 50, 0, 0, 0, 0, 1, 0, 3, 0, 0, 7, 0, 0, 0, 0, 0, 1, 5, 0, -1, 1, 0.1, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'staff_frostbeam', 36, 2, 1, 1.33, 0], 12702: [13708, 3, None, None, 5, 0, 0, -45, -80, 0, 0, 0, 100, 0.25, 2.5, 50, 0, 0, 0, 0, 1, 0, 2, 0, 0, 21, 0, 0, 0, 0, 0, 1, 20, 2, -1, 3, 0.1, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'staff_pestilence', 33, 2, 1, 1.33, 0], 12703: [13708, 3, None, None, 4, 0, 0, -60, -100, 0, 0, 0, 100, 0.25, 1.5, 20, 20, 0, 0, 0, 1, 0, 1, 0, 0, 14, 0, 0, 0, 0, 0, 1, 15, 0, -1, 2, 0.1, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'staff_sparks', 32, 2, 1, 1.33, 0], 12704: [13708, 3, None, None, 8.5, 0, 0, -120, -200, 0, 0, 0, 100, 0.25, 3.5, 120, 0, 0, 0, 0, 1, 0, 3, 0, 0, 3, 0, 0, 0, 0, 0, 1, 6, 2, -1, 4, 0.1, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_fire', 37, 2, 1, 1.33, 0], 12705: [13708, 3, None, None, 18, 0, 0, -210, -300, 0, 0, 0, 100, 0.25, 7.5, 50, 0, 0, 0, 0, 1, 0, 0, 0, 0, 16, 0, 0, 1, 0, 1, 1, 0, 0, -1, 8, 0.1, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_deluge', 34, 2, 1, 1.33, 0], 12706: [13708, 3, None, None, 15, 0, 0, -300, -350, 0, 0, 0, 100, 0.25, 11.5, 20, 40, 0, 0, 0, 1, 0, 1, 0, 0, 22, 0, 0, 0, 0, 0, 1, 0, 0, -1, 12, 0.1, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_shockwave', 35, 2, 1, 1.33, 0], 12707: [13708, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'staff_concentration', 0, 0, 1, 0, 0], 12708: [13708, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0.04, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_spirit_lore', 0, 0, 1, 0, 0], 12709: [13708, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_conservation', 0, 0, 1, 0, 0], 12710: [13708, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_harmony', 0, 0, 1, 0, 0], 12900: [13711, 5, None, None, 1, 1, 0, 0, 0, 0, 0, 0, 100, 0.4, 0.4, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_shoot', 6, 1, 1, 1, 0], 12901: [13711, 5, None, None, 1, 1, 0, 0, -150, 0, -80, -20, 100, 0.25, 3, -1, 10, 1, 0, 0, 3, 1300, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_round_shot', 6, 1, 1, 1, 0], 12902: [13711, 5, 14201, None, 1.5, 1, 0, 0, -75, 0, -20, -100, 100, 0.25, 3, -1, 5, 1, 0, 0, 2, 950, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_chain_shot', 6, 1, 1, 1, 0], 12903: [13711, 5, 14202, None, 1, 1, 0, 0, -80, 0, -50, -40, 100, 0.25, 6, -1, 10, 0, 0, 0, 1, 900, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_grape_shot', 30, 1, 1, 1, 0], 12904: [13711, 5, 14203, None, 1, 1, 0, 0, -125, 0, -100, -40, 100, 0.25, 3, -1, 10, 1, 0, 0, 3, 1300, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 20, 2, 100, 0, 0.5, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cannon_firebrand', 9, 1, 1, 1, 0], 12905: [13711, 5, 14204, None, 3, 1, 0, 0, -275, 0, -240, -60, 100, 0.25, 3, -1, 0, 0, 0, 0, 1, 1700, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cannon_thunderbolt', 16, 1, 1, 1, 0], 12906: [13711, 5, 14205, None, 20, 1, 0, 0, -1500, 0, -960, -200, 90, 0.25, 10, -1, 25, 1, 0, 0, 1, 750, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cannon_explosive', 9, 1, 1, 1, 0], 12907: [13711, 5, 14206, None, 1, 1, 0, 0, -300, 0, -144, -30, 100, 0.25, 3, -1, 0, 0, 0, 0, 3, 1300, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cannon_fury', 16, 1, 1, 1, 0], 12908: [13711, 5, None, None, 3, 1, 0, 0, 0, 0, 0, 0, 100, 0.25, 3, -1, 0, 0, 0, 0, 1, 750, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_grapple_hook', 11, 1, 1, 1, 0], 12909: [13711, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.06, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'cannon_scrounger', 0, 0, 1, 0, 0], 12910: [13711, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, u'cannon_barrage', 0, 0, 1, 0, 0], 12911: [13711, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 1, 4, 2, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cannon_shrapnel', 0, 0, 1, 0, 0], 12930: [13718, 5, None, None, 1, 1, 0, 0, 0, 0, 0, 0, 100, 0.4, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_shoot', 6, 1, 1, 1, 0], 12931: [13718, 5, None, None, 1, 1, 0, 0, -150, 0, -150, -100, 100, 0.25, 1, -1, 10, 0, 0, 0, 6, 1300, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_round_shot', 6, 1, 1, 1, 0], 12932: [13718, 5, None, None, 1.5, 1, 0, 0, -75, 0, -75, -50, 100, 0.25, 2, -1, 5, 0, 0, 0, 6, 750, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pir_t_gui_amo_can_targetedshot', 6, 1, 1, 1, 0], 12933: [13718, 5, None, None, 3, 1, 0, 0, 0, 0, 0, 0, 100, 0.25, 5, -1, 0, 0, 0, 0, 4, 1700, 0, 0, 0, 82, 0, 0, 0, 0, 0, 0, 5, 4, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_amo_can_smoke', 16, 1, 1, 1, 0], 12934: [13718, 5, None, None, 3, 1, 0, 0, 0, 0, -100, -100, 100, 0.25, 12, -1, 0, 0, 0, 0, 6, 750, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pir_t_gui_amo_can_mine', 11, 1, 1, 1, 0], 12935: [13718, 5, None, None, 1, 1, 0, 0, -125, 0, -125, -80, 100, 0.25, 1, -1, 10, 0, 0, 0, 3, 1300, 0, 0, 0, 81, 0, 0, 0, 0, 0, 1, 20, 2, -1, 0, 0.5, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cannon_firebrand', 9, 1, 1, 1, 0], 12936: [13718, 5, None, None, 1, 1, 0, 0, -150, 0, -150, -50, 100, 0.25, 2, -1, 10, 0, 0, 0, 6, 1300, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pir_t_gui_amo_can_scatter', 30, 1, 1, 1, 0], 12937: [13718, 5, None, None, 1, 1, 0, 0, 0, 0, 0, 0, 100, 0.25, 24, -1, 10, 0, 0, 0, 1, 1300, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0.5, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'pir_t_gui_amo_can_powderKeg', 9, 1, 1, 1, 0], 12938: [13718, 5, None, None, 1, 1, 0, 0, -400, 0, -400, -320, 100, 0.25, 1, -1, 0, 0, 0, 0, 3, 5000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'pir_t_gui_amo_can_bullet', 16, 1, 1, 1, 0], 12939: [13718, 5, None, None, 1, 1, 0, 0, -100, 0, -300, -200, 100, 0.25, 12, -1, 0, 0, 0, 0, 3, 1300, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'pir_t_gui_amo_can_iceshot', 16, 1, 1, 1, 0], 12940: [13718, 5, None, None, 1, 1, 0, 0, -300, 0, -300, -150, 100, 0.25, 4, -1, 70, 0, 0, 0, 4, 1300, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cannon_explosive', 16, 1, 1, 1, 0], 12941: [13718, 5, None, None, 1, 1, 0, 0, -300, 0, -2440, -2440, 100, 0.25, 14, -1, 0, 0, 0, 0, 2, 1300, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'pir_t_gui_amo_can_chum', 16, 1, 1, 1, 0], 12942: [13718, 5, None, None, 1, 1, 0, 0, -500, 0, -500, -100, 100, 0.25, 8, -1, 200, 0, 0, 0, 1, 1300, 1, 0, 0, 81, 0, 0, 0, 0, 0, 0, 20, 2, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'pir_t_gui_amo_can_firestorm', 16, 1, 1, 1, 0], 12943: [13718, 5, None, None, 1, 1, 0, 0, 0, 0, 0, 0, 100, 0.25, 0, -1, 0, 0, 0, 0, 3, 1300, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cannon_round_shot', 16, 1, 1, 1, 0], 12944: [13718, 5, None, None, 3, 1, 0, 0, 0, 0, -1200, -1200, 100, 0.25, 24, -1, 0, 0, 0, 0, 1, 750, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pir_t_gui_amo_can_mine', 11, 1, 1, 1, 0], 12945: [13718, 5, None, None, 3, 1, 0, 0, 0, 0, -800, -800, 100, 0.25, 0, -1, 0, 0, 0, 0, 6, 750, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pir_t_gui_amo_can_mine', 11, 1, 1, 1, 0], 12946: [13718, 5, None, None, 1, 1, 0, 0, -100, 0, -100, 0, 100, 0.25, 0, -1, 0, 0, 0, 0, 3, 1300, 0, 0, 0, 83, 0, 0, 0, 0, 0, 0, 5, 4, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'pir_t_gui_amo_can_iceshot', 16, 1, 1, 1, 0], 'columnHeadings': {u'ANIM_TIME_INDEX': 54, u'NPC_MODIFIER': 53, u'REPUTATION_INDEX': 4, u'SELF_HP_INDEX': 6, u'NEED_SIGHT': 30, u'ATTACK_CLASS_INDEX': 1, u'INSTANT_INDEX': 27, u'SKILL_ICON_INDEX': 50, u'AREA_EFFECT_INDEX': 16, u'ACCURACY_INDEX': 12, u'FREE_INDEX': 48, u'SHIP_ACCEL_INDEX': 41, u'DURATION_INDEX': 32, u'MAX_CHARGE_INDEX': 35, u'SELF_MOJO_INDEX': 7, u'REPUTATION_CATEGORY_INDEX': 0, u'LINKED_SKILL': 3, u'NEED_TARGET_INDEX': 28, u'PROJECTILE_POWER_INDEX': 21, u'USABLE_IN_AIR': 40, u'SKILL_TYPE': 38, u'TARGET_MOJO_INDEX': 9, u'REACTION_DELAY_INDEX': 36, u'SHOUT_INDEX': 47, u'FRIENDLY_FIRE_INDEX': 19, u'PVP_INDEX': 49, u'SPLIT_TARGET_INDEX': 29, u'UPGRADE_INDEX': 13, u'RECUR_INDEX': 33, u'UNATTUNE_INDEX': 24, u'SHIP_MAXTURN_INDEX': 46, u'EFFECT_FLAG_INDEX': 25, u'SELF_USE_INDEX': 18, u'AREA_EFFECT_SELF_DAMAGE_INDEX': 17, u'AMMO_INVENTORY_TYPE': 2, u'IS_PROJECTILE_INDEX': 5, u'HOSTILE_BUFF': 31, u'SKILL_TRACK_INDEX': 39, u'TARGET_HP_INDEX': 8, u'INTERRUPT_INDEX': 23, u'SHIP_REVACCEL_INDEX': 43, u'CENTER_VFX': 52, u'MAX_QUANTITY_INDEX': 34, u'SHIP_MAXSPEED_INDEX': 42, u'NUM_HIT_INDEX': 37, u'AREA_SHAPE_INDEX': 22, u'HULL_HP_INDEX': 10, u'DELAY_INDEX': 26, u'SHIP_TURNRATE_INDEX': 45, u'HIT_VFX': 51, u'VOLLEY_INDEX': 20, u'SAIL_HP_INDEX': 11, u'RECHARGE_INDEX': 14, u'RANGE_INDEX': 15, u'SHIP_REVMAXSPEED_INDEX': 44, u'NEED_AREA_ANIM_INDEX': 55}, 12400: [13706, 1, None, None, 1, 0, 0, 0, -9, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.2, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'dagger_cut', 11, 1, 0.75, 0.75, 0], 12401: [13706, 1, None, None, 2, 0, 0, 0, -26, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.2, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'dagger_gouge', 12, 1, 1, 0.583, 0], 12402: [13706, 1, None, None, 2, 0, 0, 0, -44, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.56, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_swipe', 13, 1, 1.5, 0.958, 0], 12403: [13706, 1, None, None, 3, 0, 0, 0, -68, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.2, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_impale', 14, 1, 2, 1.5, 0], 12404: [13706, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 0, 0, 1, 0, 0], 13000: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 50, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, -1, 0, 0.4, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_ico_pot_tonic', 0, 1, 1, 0.94, 0], 13001: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, -1, 0, 0.4, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_ico_pot_tonic', 0, 1, 1, 0.94, 0], 3000: [13713, 1, None, None, 1, 0, 0, 0, -150, 0, 0, 0, 100, 0.25, 2, 8, 0, 0, 0, 0, 1, 0, 0, 1, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.4, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_cleave', 5, 1, 1, 1, 0], 3001: [13713, 2, None, None, 1, 0, 0, 0, -80, 0, 0, 0, 100, 0.25, 2, 100, 0, 0, 0, 0, 1, 0, 0, 1, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.4, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_concentration', 0, 1, 0.75, 1, 0], 3002: [13713, 3, None, None, 1, 0, 0, 0, -30, 0, 0, 0, 100, 0.25, 30, 60, 60, 0, 0, 0, 1, 0, 1, 1, 1, 8, 0, 0, 0, 0, 0, 1, 20, 0, -1, 0, 0.4, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_shackle', 40, 1, 0.25, 1, 0], 3003: [13713, 3, None, None, 1, 0, 0, -800, -500, 0, 0, 0, 100, 0.25, 30, 50, 40, 0, 0, 0, 1, 0, 1, 1, 1, 26, 0, 0, 0, 0, 0, 1, 5, 0, -1, 0, 0.4, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_shockwave', 40, 1, 4, 1, 0], 3004: [13713, 3, None, None, 1, 0, 0, -200, 0, 0, 0, 0, 100, 0.25, 30, 60, 0, 0, 0, 0, 1, 0, 0, 0, 0, 38, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0.4, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_curse', 0, 1, 0, 1, 0], 3005: [13713, 2, None, None, 1, 0, 0, 0, -120, 0, 0, 0, 100, 0.25, 6, 80, 25, 0, 0, 0, 1, 0, 1, 1, 1, 6, 0, 0, 1, 0, 0, 1, 10, 0, -1, 0, 0.4, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_explosion', 0, 1, 1.5, 1, 0], 3006: [13713, 3, None, None, 1, 0, 0, 0, -60, 0, 0, 0, 100, 0.25, 15, 70, 60, 0, 0, 0, 1, 0, 1, 0, 0, 39, 0, 0, 0, 0, 0, 1, 20, 2, -1, 0, 0.4, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_harmony', 40, 1, 1, 1, 0], 3007: [13713, 1, None, None, 1, 0, 0, 0, -600, 0, 0, 0, 100, 0.25, 10, 8, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.4, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_bladestorm', 10, 1, 5, 1, 0], 11200: [0, 0, 15001, None, 0, 0, 200, 50, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_ico_pot_tonic', 38, 1, 1, 0, 0], 11201: [0, 0, 15002, None, 0, 0, 350, 100, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_ico_pot_tonic', 38, 1, 1, 0, 0], 11202: [0, 0, 15003, None, 0, 0, 650, 250, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_ico_pot_holyWater', 38, 1, 1, 0, 0], 11203: [0, 0, 15004, None, 0, 0, 1000, 375, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_ico_pot_elixir', 38, 1, 1, 0, 0], 11204: [0, 0, 15005, None, 0, 0, 1500, 500, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_ico_pot_miracleWater', 38, 1, 1, 0, 0], 11205: [0, 0, 15006, None, 0, 0, 0, 0, 0, 0, 5000, 2000, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 28, 0, 1, 1, 0, 0, 0, 0, 0, 2, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'sail_come_about', 0, 1, 1, 0, 0], 11206: [0, 0, 15007, None, 0, 0, 2000, 800, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 5, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_ico_pot_porkTonic', 38, 1, 1, 0, 0], 11207: [0, 0, 15008, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 41, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_cannonDmgUp', 38, 1, 1, 0, 0], 11208: [0, 0, 15009, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 42, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_cannonDmgUp', 38, 1, 1, 0, 0], 11209: [0, 0, 15010, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 43, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_cannonDmgUp', 38, 1, 1, 0, 0], 11210: [0, 0, 15011, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 44, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_gunDmgUp', 38, 1, 1, 0, 0], 11211: [0, 0, 15012, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 45, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_gunDmgUp', 38, 1, 1, 0, 0], 11212: [0, 0, 15013, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 46, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_gunDmgUp', 38, 1, 1, 0, 0], 11213: [0, 0, 15014, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 47, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_swordDmgUp', 38, 1, 1, 0, 0], 11214: [0, 0, 15015, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 48, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_swordDmgUp', 38, 1, 1, 0, 0], 11215: [0, 0, 15016, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 49, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_swordDmgUp', 38, 1, 1, 0, 0], 11216: [0, 0, 15017, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 50, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_voodooDmgUp', 38, 1, 1, 0, 0], 11217: [0, 0, 15018, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 51, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_voodooDmgUp', 38, 1, 1, 0, 0], 11218: [0, 0, 15019, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 52, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_voodooDmgUp', 38, 1, 1, 0, 0], 11219: [0, 0, 15020, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 53, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_haste', 38, 1, 1, 0, 0], 11220: [0, 0, 15021, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 54, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_haste', 38, 1, 1, 0, 0], 11221: [0, 0, 15022, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 55, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_haste', 38, 1, 1, 0, 0], 11222: [0, 0, 15023, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 56, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_repBoost', 38, 1, 1, 0, 0], 11223: [0, 0, 15024, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 57, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_repBoost', 38, 1, 1, 0, 0], 11224: [0, 0, 15025, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 58, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_gold', 38, 1, 1, 0, 0], 11225: [0, 0, 15026, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 59, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_gold', 38, 1, 1, 0, 0], 11226: [0, 0, 15027, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 60, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_invisibility', 38, 1, 1, 0, 0], 11227: [0, 0, 15028, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 61, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_invisibility', 38, 1, 1, 0, 0], 11228: [0, 0, 15029, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 62, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_healthRegeneration', 38, 1, 1, 0, 0], 11229: [0, 0, 15030, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 63, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_healthRegeneration', 38, 1, 1, 0, 0], 11230: [0, 0, 15031, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 64, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_healthRegeneration', 38, 1, 1, 0, 0], 11231: [0, 0, 15032, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 65, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_healthRegeneration', 38, 1, 1, 0, 0], 11232: [0, 0, 15033, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 66, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_burp', 38, 1, 1, 0, 0], 11233: [0, 0, 15034, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 67, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_fart', 38, 1, 1, 0, 0], 11234: [0, 0, 15035, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 68, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_puke', 38, 1, 1, 0, 0], 11235: [0, 0, 15036, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 69, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_headIncrease', 38, 1, 1, 0, 0], 11236: [0, 0, 15037, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 70, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_faceColor', 38, 1, 1, 0, 0], 11237: [0, 0, 15038, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 71, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_avatarDecrease', 38, 1, 1, 0, 0], 11238: [0, 0, 15039, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 72, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_avatarIncrease', 38, 1, 1, 0, 0], 11239: [0, 0, 15040, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 73, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_headOnFire', 38, 1, 1, 0, 0], 11240: [0, 0, 15041, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 74, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_scorpionTransform', 38, 1, 1, 0, 0], 11241: [0, 0, 15042, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 75, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_alligatorTransform', 38, 1, 1, 0, 0], 11242: [0, 0, 15043, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 76, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_crabTransform', 38, 1, 1, 0, 0], 11243: [0, 0, 15044, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 77, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_targeted', 38, 1, 1, 0, 0], 11244: [0, 0, 15045, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 78, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_targeted', 38, 1, 1, 0, 0], 11245: [0, 0, 15046, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 79, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_targeted', 38, 1, 1, 0, 0], 11246: [0, 0, 15047, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 80, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_removeGroggy', 38, 1, 1, 0, 0], 11247: [0, 0, 15048, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 84, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_repBoost', 38, 1, 1, 0, 0], 11248: [0, 0, 15049, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 85, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_fart', 38, 1, 1, 0, 0], 11251: [0, 0, 15052, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 88, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_removeGroggy', 38, 1, 1, 0, 0], 11252: [0, 0, 15053, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 89, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_pot_repBoost', 38, 1, 1, 0, 0], 3100: [13713, 1, None, None, 1, 0, 0, -30, -75, -30, 0, 0, 100, 0.25, 0.5, 6, 25, 0, 0, 0, 1, 0, 1, 0, 0, 26, 0, 0, 0, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_curse', 31, 1, 1, 2, 0], 3101: [13713, 1, None, None, 1, 0, 0, 0, -10000, 0, 0, 0, 100, 0.25, 0.5, 7, 25, 0, 0, 0, 1, 0, 1, 0, 0, 26, 0, 0, 0, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_curse', 10, 1, 100, 2, 0], 3102: [13703, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 180, 200, 0, 0, 1, 0, 0, 0, 0, 0, 0, 201, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_curse', 0, 0, 1, 1, 0], 11300: [0, 0, 11300, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_fsh_stall', 38, 1, 1, 0, 0], 11301: [0, 0, 11301, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'pir_t_gui_fsh_sink', 38, 1, 1, 0, 0], 3200: [13713, 4, None, None, 1, 0, 0, 0, -75, 0, 0, 0, 100, 0.25, 0, 0, 7, 0, 0, 0, 1, 0, 1, 1, 1, 156, 0, 0, 0, 0, 0, 1, 3, 0, -1, 0, 0.1, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_explosion', 9, 1, 10, 1, 0], 3300: [13713, 4, None, None, 1, 0, 0, 0, -1000, 0, 0, 0, 100, 0.25, 2, 8, 30, 0, 0, 0, 1, 0, 1, 1, 1, 156, 0, 0, 0, 0, 0, 1, 6, 0, -1, 0, 0.4, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_explosion', 42, 1, 50, 1, 0], 3301: [13713, 4, None, None, 1, 0, 0, 0, -1000, 0, 0, 0, 100, 0.25, 2, 8, 30, 0, 0, 0, 1, 0, 1, 1, 1, 156, 0, 0, 0, 0, 0, 1, 6, 0, -1, 0, 0.4, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_explosion', 42, 1, 50, 1, 0], 2260: [13709, 3, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'voodoo_hasten', 0, 2, 1, 2.625, 0], 2261: [13709, 3, None, None, 4, 0, 0, -8, -65, 0, 0, 0, 100, 0.25, 4, 130, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0.6, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'voodoo_poke', 20, 2, 0.5, 2.08, 0], 3400: [13712, 0, None, None, 4, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 60, -1, 150, 1, 0, 1, 0, 0, 1, 0, 0, 108, 0, 0, 0, 0, 0, 0, 6, 0, -1, 0, 0, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, u'sail_broadside_right', 0, 1, 1, 0, 0], 3401: [13712, 0, None, None, 4, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 60, -1, 150, 1, 0, 1, 0, 0, 1, 0, 0, 109, 0, 0, 0, 0, 0, 0, 6, 0, -1, 0, 0, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, u'cannon_chain_shot', 0, 1, 1, 0, 0], 3402: [13712, 0, None, None, 4, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 60, -1, 150, 1, 0, 1, 0, 0, 1, 0, 0, 110, 0, 0, 0, 0, 0, 0, 6, 0, -1, 0, 0, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, u'grenade_explosion', 0, 1, 1, 0, 0], 3403: [13712, 0, None, None, 4, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 45, -1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 111, 0, 0, 0, 0, 0, 0, 10, 0, -1, 0, 0, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, u'grenade_demolition', 0, 1, 1, 0, 0], 3404: [13712, 0, None, None, 4, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 45, -1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 112, 0, 0, 0, 0, 0, 0, 15, 0, -1, 0, 0, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, u'sail_tacking', 0, 1, 1, 0, 0], 3500: [0, 0, None, None, 0, 0, 0, -50, 0, 0, 0, 0, 100, 0.25, 30, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_conservation', 43, 2, 1, 2.625, 0], 3501: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 30, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 158, 0, 0, 0, 0, 0, 0, 20, 0, -1, 0, 0, 1, 1, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_free', 0, 2, 1, 2.625, 0], 3502: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_parry', 0, 2, 1, 2.625, 0], 3503: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_concentration', 0, 2, 1, 2.625, 0], 3504: [0, 0, None, None, 0, 0, 0, 0, 400, 0, 0, 0, 100, 0.25, 20, 0, 20, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, -1, 0, 0, 1, 1, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_determination2', 0, 2, 1, 2.625, 0], 3505: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 30, 12, 0, 0, 0, 0, 1, 0, 0, 0, 0, 160, 0, 0, 1, 0, 0, 1, 30, 0, -1, 0, 0.5, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'sail_openfire2', 0, 1, 1, 0.8, 0], 3506: [0, 0, None, None, 0, 0, 0, -50, 0, 0, 0, 0, 100, 0.25, 30, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 165, 0, 0, 0, 0, 0, 0, 10, 0, -1, 0, 0, 1, 1, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_focus', 0, 2, 1, 1, 0], 3507: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, -1, 0, 0, 1, 1, 6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_focus', 0, 2, 1, 1, 0], 3508: [0, 0, None, None, 0, 0, 0, 0, 300, 0, 0, 0, 100, 0.25, 30, -1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 161, 0, 0, 0, 0, 0, 0, 20, 0, -1, 0, 0, 1, 1, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_toughness', 45, 2, 1, 0.625, 0], 3509: [0, 0, None, None, 0, 0, 250, 50, 0, 0, 0, 0, 100, 0.25, 30, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, u'voodoo_cure', 38, 2, 1, 2.625, 0], 3510: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 30, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 168, 0, 0, 0, 0, 0, 0, 12, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_life_drain', 0, 2, 1, 2.625, 0], 3600: [0, 4, None, None, 1, 0, 0, 0, -1000, 0, 0, 0, 100, 0.25, 2, 8, 30, 0, 0, 0, 1, 0, 1, 1, 1, 156, 0, 0, 0, 0, 0, 1, 6, 0, -1, 0, 0.4, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_explosion', 42, 1, 50, 1, 0], 3601: [0, 1, None, None, 1, 0, 0, 0, -5, 0, 0, 0, 80, 0.25, 1, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_stab', 0, 1, 1, 1, 0], 3602: [0, 1, None, None, 1, 0, 0, 0, -12, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 3, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.22, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_stab', 1, 1, 0.75, 0.625, 0], 3701: [13703, 1, None, None, 4, 0, 0, -80, -150, 0, 0, 0, 100, 0.25, 60, 6, 15, 0, 0, 0, 1, 0, 1, 0, 0, 163, 0, 0, 1, 0, 0, 1, 10, 2, -1, 0, 0.6, 1, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pir_t_gui_amo_can_firestorm', 9, 1, 1.25, 1.42, 0], 3702: [13703, 1, None, None, 4, 0, 0, -120, -150, 0, 0, 0, 100, 0.25, 60, 6, 15, 0, 0, 0, 1, 0, 1, 0, 0, 164, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.6, 1, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'dagger_blade_instinct', 31, 1, 1.25, 1.42, 0], 3703: [13703, 1, None, None, 4, 0, 0, -160, -300, 0, 0, 0, 100, 0.25, 60, 6, 15, 0, 0, 0, 1, 0, 1, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.6, 1, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'staff_lightning', 44, 1, 1.25, 1.42, 0], 3008: [13713, 3, None, None, 1, 0, 0, 0, -400, 0, 0, 0, 100, 0.25, 5, 200, 0, 0, 0, 0, 1, 0, 0, 1, 1, 2, 0, 0, 1, 0, 0, 1, 20, 2, -1, 0, 0.4, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_lightning', 41, 1, 4, 1, 0], 2330: [13703, 1, None, None, 2, 0, 0, 0, -60, 0, 0, 0, 95, 0.25, 3, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 10, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0.22, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_cleave', 6, 1, 1.5, 1, 0], 2331: [13703, 1, None, None, 2, 0, 0, 0, -80, 0, 0, 0, 80, 0.25, 30, 12, 0, 0, 0, 0, 1, 0, 3, 0, 0, 4, 0, 0, 1, 0, 0, 1, 10, 2, -1, 0, 0.22, 1, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_finesse', 10, 1, 1.5, 1, 0], 2332: [13703, 1, None, None, 2, 0, 0, 0, -60, 0, 0, 0, 90, 0.25, 3, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 33, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0.22, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_cleave', 1, 1, 1.5, 1, 0], 2333: [13703, 1, None, None, 1, 0, 0, -40, -50, 0, 0, 0, 100, 0.25, 15, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 163, 0, 0, 1, 0, 0, 1, 5, 2, -1, 0, 0.22, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cannon_flaming_skull', 9, 1, 0.75, 0.625, 0], 12000: [13702, 1, None, None, 1, 0, 0, 0, -5, 0, 0, 0, 80, 0.25, 1, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_stab', 0, 1, 1, 1, 0], 12100: [13703, 1, None, None, 1, 0, 0, 0, -12, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.22, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_hack', 1, 1, 0.75, 0.625, 0], 12101: [13703, 1, None, None, 2, 0, 0, 0, -33, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.46, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_slash', 2, 1, 1, 0.75, 0], 12102: [13703, 1, None, None, 2, 0, 0, 0, -45, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.7, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_cleave', 5, 1, 1.25, 1.125, 0], 12103: [13703, 1, None, None, 4, 0, 0, 0, -80, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.6, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_flourish', 4, 1, 1.5, 1.58, 0], 12104: [13703, 1, None, None, 6, 0, 0, 0, -110, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 3, 1, 1.75, 1.3, 0], 12105: [13703, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_parry', 0, 0, 1, 0, 0], 12106: [13703, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'grenade_toughness', 0, 0, 1, 0, 0], 12107: [13703, 1, None, None, 2, 0, 0, 0, -60, 0, 0, 0, 100, 0.25, 15, 6, 15, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0.6, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_sweep', 29, 1, 1.25, 1.42, 0], 12108: [13703, 1, None, None, 5, 0, 0, 0, -25, 0, 0, 0, 100, 0.25, 60, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 6, 0, 0, 1, 0, 0, 1, 2.5, 0, -1, 0, 0.5, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_brawl', 6, 1, 0.75, 1.41, 0], 12109: [13703, 1, None, None, 8, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 30, 12, 0, 0, 0, 0, 1, 0, 0, 0, 0, 10, 0, 0, 1, 0, 0, 1, 30, 0, -1, 0, 0.5, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_taunt', 15, 1, 1, 1.625, 0], 12110: [13703, 1, None, None, 10, 0, 0, 0, -200, 0, 0, 0, 100, 0.25, 30, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0.1, 5, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_bladestorm', 7, 1, 5, 4.16, 0], 2360: [13703, 1, None, 12100, 1, 0, 0, 0, -9, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.19, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_hack', 55, 1, 0.5, 0.225, 0], 2361: [13703, 1, None, 12101, 2, 0, 0, 0, -24, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.12, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_slash', 56, 1, 1.025, 0.2, 0], 2362: [13703, 1, None, 12102, 2, 0, 0, 0, -34, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.7, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'cutlass_cleave', 57, 1, 0.875, 0.475, 0], 2363: [13703, 1, None, 12103, 4, 0, 0, 0, -60, 0, 0, 0, 100, 0.25, 0.5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.6, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_flourish', 58, 1, 0.6, 0.65, 0], 12604: [13709, 3, None, None, 15, 0, 0, -30, 0, 0, 0, 0, 100, 0.25, 30, 130, 0, 0, 0, 0, 1, 0, 0, 0, 0, 12, 0, 0, 1, 0, 0, 1, 60, 0, -1, 0, 0.1, 1, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'voodoo_curse', 26, 2, 1, 2.625, 0], 2370: [0, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'staff_blast', 45, 2, 1, 2.625, 0], 12200: [13704, 2, None, None, 2, 0, 0, 0, -15, 0, 0, 0, 80, 0.25, 0.5, 70, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pistol_shoot', 0, 0, 1, 0.55, 0], 12201: [13704, 2, None, None, 1, 0, 0, 0, -20, 0, 0, 0, 80, 0.25, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'lead', 0, 0, 1, 0, 0], 12202: [13704, 2, 13901, None, 2, 0, 0, 0, -25, 0, 0, 0, 80, 0.25, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 5, 0, 0, 1, 0, 1, 1, 10, 2, 50, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'venom', 8, 0, 1, 0, 0], 12203: [13704, 2, 13902, None, 2, 0, 0, 0, -35, 0, 0, 0, 80, 0.25, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 14, 0, 0, 1, 0, 1, 1, 4, 0, 50, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'steel', 0, 0, 1, 0, 0], 12204: [13704, 2, 13903, None, 2, 0, 0, 0, -20, -35, 0, 0, 80, 0.25, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 50, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'gold', 0, 0, 1, 0, 0], 12205: [13704, 2, 13904, None, 3, 0, 0, 0, -50, 0, 0, 0, 80, 0.25, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 16, 0, 0, 1, 0, 1, 0, 0, 0, 50, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'silver', 0, 0, 1, 0, 0], 12206: [13704, 2, 13905, None, 3, 0, 0, 0, -55, 0, 0, 0, 80, 0.25, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 17, 0, 0, 1, 0, 1, 0, 0, 0, 50, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'iron', 0, 0, 1, 0, 0], 12207: [13704, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'pistol_sharp_shooter', 0, 0, 1, 0, 0], 12208: [13704, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'dagger_dodge', 0, 0, 1, 0, 0], 12209: [13704, 0, None, None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'pistol_eagle_eye', 0, 0, 1, 0, 0], 12210: [13704, 2, None, None, 5, 0, 0, 0, -15, 0, 0, 0, 110, 0.25, 1, 100, 0, 0, 0, 0, 1, 0, 0, 0, 0, 7, 0, 0, 1, 0, 1, 1, 3, 0, -1, 2, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, u'pistol_take_aim', 0, 0, 1, 0.55, 0], 2000: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 1, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2001: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 1, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2002: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 2, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 1, 1, 3, 0, -1, 0, 0.1, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 2, 1, 2, 0, 0], 2020: [13713, 3, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 70, 0.25, 5, 150, 0, 0, 0, 0, 1, 0, 0, 0, 0, 21, 0, 0, 1, 0, 0, 1, 20, 2, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 8, 1, 1, 0, 0], 2021: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 70, 0.25, 8, 115, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 4, 0, -1, 0, 1.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2022: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 20, 110, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 4, 0, -1, 0, 2, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 0.25, 0, 0], 2023: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 8, 110, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 4, 0, -1, 0, 0.4, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 0.5, 0, 0], 2024: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 5, 110, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 4, 0, -1, 0, 3.2, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 3, 0, 0], 2025: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 5, 115, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 4, 0, -1, 0, 1.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1.5, 0, 0], 2026: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 90, 0.25, 10, 6, 10, 0, 0, 0, 1, 0, 1, 0, 0, 26, 0, 0, 1, 0, 0, 1, 4, 0, -1, 0, 0.3, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 0.75, 0, 0], 2040: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 30, 15, 0, 0, 0, 0, 1, 0, 3, 0, 0, 7, 0, 0, 1, 0, 0, 1, 5, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2041: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 30, 15, 0, 0, 0, 0, 1, 0, 3, 0, 0, 7, 0, 0, 1, 0, 0, 1, 5, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1, 0, 0], 2042: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 2, 10, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 0.75, 0, 0], 2043: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 2, 10, 0, 0, 0, 0, 1, 0, 0, 0, 0, 26, 0, 0, 1, 0, 0, 1, 3, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 0.75, 0, 0], 2044: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 5, 12, 0, 0, 0, 0, 1, 0, 2, 0, 0, 7, 0, 0, 1, 0, 0, 1, 10, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1.25, 0, 0], 2045: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0.25, 5, 12, 0, 0, 0, 0, 1, 0, 2, 0, 0, 7, 0, 0, 1, 0, 0, 1, 10, 0, -1, 0, 0.1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 0, 1, 1.25, 0, 0], 2046: [13713, 1, None, None, 1, 0, 0, 0, 0, 0, 0, 0, 100, 0.25, 15, 25, 25, 0, 0, 0, 1, 0, 1, 0, 0, 156, 0, 0, 1, 0, 0, 1, 6, 0, -1, 0, 1.43, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, u'cutlass_stab', 42, 1, 0.25, 0, 0]}
__columnHeadings = skillInfo.pop('columnHeadings')
for (heading, value) in __columnHeadings.items():
globals()[heading] = value
del __columnHeadings | [
"[email protected]"
] | |
447deb31d2d1a1b72324352aea01dbd53f57f36d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03264/s772913299.py | cde5df2d33feac6966bd5aed1a9f6818692d8236 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | def ip():return int(input())
k=ip()
print((k+1)//2*(k//2)) | [
"[email protected]"
] | |
93562f7a23b36ff28a4168abcecb72557e221173 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/ankiandroid/testcase/interestcases/testcase0_33_2_013.py | 6e49203500865fc74ce0dd3171d513841a9833e8 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,166 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.ichi2.anki',
'appActivity' : 'com.ichi2.anki.IntentHandler',
'resetKeyboard' : True,
'androidCoverage' : 'com.ichi2.anki/com.ichi2.anki.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1) :
for temp in elements :
if temp.get_attribute("enabled") == "true" :
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.55, 0.5, 0.2)
else :
return element
for i in range(0, 4, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1):
for temp in elements:
if temp.get_attribute("enabled") == "true":
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.2, 0.5, 0.55)
else :
return element
return
def scrollToClickElement(driver, str) :
element = scrollToFindElement(driver, str)
if element is None :
return
else :
element.click()
def clickInList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
element.click()
else :
if checkWindow(driver) :
driver.press_keycode(4)
def clickOnCheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
def typeText(driver, value) :
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys(value)
enterelement = getElememt(driver, "new UiSelector().text(\"OK\")")
if (enterelement is None) :
if checkWindow(driver):
driver.press_keycode(4)
else :
enterelement.click()
def checkWindow(driver) :
dsize = driver.get_window_size()
nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size
if dsize['height'] > nsize['height']:
return True
else :
return False
# preference setting and exit
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
os.popen("adb shell am start -n com.ichi2.anki/com.ichi2.anki.Preferences -a test")
scrollToClickElement(driver, "new UiSelector().text(\"Advanced\")")
scrollToClickElement(driver, "new UiSelector().text(\"Type answer into the card\")")
clickOnCheckable(driver, "new UiSelector().text(\"Type answer into the card\")", "false")
scrollToClickElement(driver, "new UiSelector().text(\"Lookup dictionary\")")
clickInList(driver, "new UiSelector().text(\"None\")")
time.sleep(1)
driver.press_keycode(4)
scrollToClickElement(driver, "new UiSelector().text(\"AnkiDroid\")")
scrollToClickElement(driver, "new UiSelector().text(\"Share feature usage\")")
clickOnCheckable(driver, "new UiSelector().text(\"Share feature usage\")", "false")
scrollToClickElement(driver, "new UiSelector().text(\"Notify when\")")
clickInList(driver, "new UiSelector().text(\"Never notify\")")
time.sleep(1)
driver.press_keycode(4)
scrollToClickElement(driver, "new UiSelector().text(\"Reviewing\")")
scrollToClickElement(driver, "new UiSelector().text(\"Keep screen on\")")
clickOnCheckable(driver, "new UiSelector().text(\"Keep screen on\")", "false")
scrollToClickElement(driver, "new UiSelector().text(\"Fullscreen mode\")")
clickInList(driver, "new UiSelector().text(\"Off\")")
scrollToClickElement(driver, "new UiSelector().text(\"Answer buttons position\")")
clickInList(driver, "new UiSelector().text(\"Top\")")
driver.press_keycode(4)
time.sleep(2)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"2_013_pre\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
# testcase013
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"Default\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"0\")", "new UiSelector().className(\"android.widget.TextView\").instance(6)")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.ichi2.anki:id/action_empty\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.ichi2.anki:id/action_rebuild\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"Custom study session\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.ichi2.anki:id/action_empty\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"0\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Import\")", "new UiSelector().className(\"android.widget.TextView\").instance(6)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"2_013\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.ichi2.anki'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
| [
"[email protected]"
] | |
819a111ff1a283057ad75626c94f9d4dab59a465 | 462e52636f351a30da5bf2159bc9b30719bbe79b | /stuff/finished libraries/matplotlib/test_path.py | 3ff83da499ffd77fa18e04d78321562253ed98b4 | [] | no_license | SoutiRini/Top-20-Python-Libraries | c27b1ae77e11209bfe97405f8e324c54d4d49db4 | 1adcc6255dc59b25a831df2d23fa759e1e7c3264 | refs/heads/master | 2022-12-08T11:09:19.587218 | 2020-08-28T22:16:41 | 2020-08-28T22:16:41 | 291,153,411 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,139 | py | import copy
import re
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from matplotlib import patches
from matplotlib.path import Path
from matplotlib.patches import Polygon
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.backend_bases import MouseEvent
def test_empty_closed_path():
path = Path(np.zeros((0, 2)), closed=True)
assert path.vertices.shape == (0, 2)
assert path.codes is None
def test_readonly_path():
path = Path.unit_circle()
def modify_vertices():
path.vertices = path.vertices * 2.0
with pytest.raises(AttributeError):
modify_vertices()
def test_path_exceptions():
bad_verts1 = np.arange(12).reshape(4, 3)
with pytest.raises(ValueError,
match=re.escape(f'has shape {bad_verts1.shape}')):
Path(bad_verts1)
bad_verts2 = np.arange(12).reshape(2, 3, 2)
with pytest.raises(ValueError,
match=re.escape(f'has shape {bad_verts2.shape}')):
Path(bad_verts2)
good_verts = np.arange(12).reshape(6, 2)
bad_codes = np.arange(2)
msg = re.escape(f"Your vertices have shape {good_verts.shape} "
f"but your codes have shape {bad_codes.shape}")
with pytest.raises(ValueError, match=msg):
Path(good_verts, bad_codes)
def test_point_in_path():
# Test #1787
verts2 = [(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)]
path = Path(verts2, closed=True)
points = [(0.5, 0.5), (1.5, 0.5)]
ret = path.contains_points(points)
assert ret.dtype == 'bool'
np.testing.assert_equal(ret, [True, False])
def test_contains_points_negative_radius():
path = Path.unit_circle()
points = [(0.0, 0.0), (1.25, 0.0), (0.9, 0.9)]
result = path.contains_points(points, radius=-0.5)
np.testing.assert_equal(result, [True, False, False])
_test_paths = [
# interior extrema determine extents and degenerate derivative
Path([[0, 0], [1, 0], [1, 1], [0, 1]],
[Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]),
# a quadratic curve
Path([[0, 0], [0, 1], [1, 0]], [Path.MOVETO, Path.CURVE3, Path.CURVE3]),
# a linear curve, degenerate vertically
Path([[0, 1], [1, 1]], [Path.MOVETO, Path.LINETO]),
# a point
Path([[1, 2]], [Path.MOVETO]),
]
_test_path_extents = [(0., 0., 0.75, 1.), (0., 0., 1., 0.5), (0., 1., 1., 1.),
(1., 2., 1., 2.)]
@pytest.mark.parametrize('path, extents', zip(_test_paths, _test_path_extents))
def test_exact_extents(path, extents):
# notice that if we just looked at the control points to get the bounding
# box of each curve, we would get the wrong answers. For example, for
# hard_curve = Path([[0, 0], [1, 0], [1, 1], [0, 1]],
# [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4])
# we would get that the extents area (0, 0, 1, 1). This code takes into
# account the curved part of the path, which does not typically extend all
# the way out to the control points.
# Note that counterintuitively, path.get_extents() returns a Bbox, so we
# have to get that Bbox's `.extents`.
assert np.all(path.get_extents().extents == extents)
def test_point_in_path_nan():
box = np.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])
p = Path(box)
test = np.array([[np.nan, 0.5]])
contains = p.contains_points(test)
assert len(contains) == 1
assert not contains[0]
def test_nonlinear_containment():
fig, ax = plt.subplots()
ax.set(xscale="log", ylim=(0, 1))
polygon = ax.axvspan(1, 10)
assert polygon.get_path().contains_point(
ax.transData.transform((5, .5)), ax.transData)
assert not polygon.get_path().contains_point(
ax.transData.transform((.5, .5)), ax.transData)
assert not polygon.get_path().contains_point(
ax.transData.transform((50, .5)), ax.transData)
@image_comparison(['arrow_contains_point.png'],
remove_text=True, style='mpl20')
def test_arrow_contains_point():
# fix bug (#8384)
fig, ax = plt.subplots()
ax.set_xlim((0, 2))
ax.set_ylim((0, 2))
# create an arrow with Curve style
arrow = patches.FancyArrowPatch((0.5, 0.25), (1.5, 0.75),
arrowstyle='->',
mutation_scale=40)
ax.add_patch(arrow)
# create an arrow with Bracket style
arrow1 = patches.FancyArrowPatch((0.5, 1), (1.5, 1.25),
arrowstyle=']-[',
mutation_scale=40)
ax.add_patch(arrow1)
# create an arrow with other arrow style
arrow2 = patches.FancyArrowPatch((0.5, 1.5), (1.5, 1.75),
arrowstyle='fancy',
fill=False,
mutation_scale=40)
ax.add_patch(arrow2)
patches_list = [arrow, arrow1, arrow2]
# generate some points
X, Y = np.meshgrid(np.arange(0, 2, 0.1),
np.arange(0, 2, 0.1))
for k, (x, y) in enumerate(zip(X.ravel(), Y.ravel())):
xdisp, ydisp = ax.transData.transform([x, y])
event = MouseEvent('button_press_event', fig.canvas, xdisp, ydisp)
for m, patch in enumerate(patches_list):
# set the points to red only if the arrow contains the point
inside, res = patch.contains(event)
if inside:
ax.scatter(x, y, s=5, c="r")
@image_comparison(['path_clipping.svg'], remove_text=True)
def test_path_clipping():
fig = plt.figure(figsize=(6.0, 6.2))
for i, xy in enumerate([
[(200, 200), (200, 350), (400, 350), (400, 200)],
[(200, 200), (200, 350), (400, 350), (400, 100)],
[(200, 100), (200, 350), (400, 350), (400, 100)],
[(200, 100), (200, 415), (400, 350), (400, 100)],
[(200, 100), (200, 415), (400, 415), (400, 100)],
[(200, 415), (400, 415), (400, 100), (200, 100)],
[(400, 415), (400, 100), (200, 100), (200, 415)]]):
ax = fig.add_subplot(4, 2, i+1)
bbox = [0, 140, 640, 260]
ax.set_xlim(bbox[0], bbox[0] + bbox[2])
ax.set_ylim(bbox[1], bbox[1] + bbox[3])
ax.add_patch(Polygon(
xy, facecolor='none', edgecolor='red', closed=True))
@image_comparison(['semi_log_with_zero.png'], style='mpl20')
def test_log_transform_with_zero():
x = np.arange(-10, 10)
y = (1.0 - 1.0/(x**2+1))**20
fig, ax = plt.subplots()
ax.semilogy(x, y, "-o", lw=15, markeredgecolor='k')
ax.set_ylim(1e-7, 1)
ax.grid(True)
def test_make_compound_path_empty():
# We should be able to make a compound path with no arguments.
# This makes it easier to write generic path based code.
r = Path.make_compound_path()
assert r.vertices.shape == (0, 2)
def test_make_compound_path_stops():
zero = [0, 0]
paths = 3*[Path([zero, zero], [Path.MOVETO, Path.STOP])]
compound_path = Path.make_compound_path(*paths)
# the choice to not preserve the terminal STOP is arbitrary, but
# documented, so we test that it is in fact respected here
assert np.sum(compound_path.codes == Path.STOP) == 0
@image_comparison(['xkcd.png'], remove_text=True)
def test_xkcd():
np.random.seed(0)
x = np.linspace(0, 2 * np.pi, 100)
y = np.sin(x)
with plt.xkcd():
fig, ax = plt.subplots()
ax.plot(x, y)
@image_comparison(['xkcd_marker.png'], remove_text=True)
def test_xkcd_marker():
np.random.seed(0)
x = np.linspace(0, 5, 8)
y1 = x
y2 = 5 - x
y3 = 2.5 * np.ones(8)
with plt.xkcd():
fig, ax = plt.subplots()
ax.plot(x, y1, '+', ms=10)
ax.plot(x, y2, 'o', ms=10)
ax.plot(x, y3, '^', ms=10)
@image_comparison(['marker_paths.pdf'], remove_text=True)
def test_marker_paths_pdf():
N = 7
plt.errorbar(np.arange(N),
np.ones(N) + 4,
np.ones(N))
plt.xlim(-1, N)
plt.ylim(-1, 7)
@image_comparison(['nan_path'], style='default', remove_text=True,
extensions=['pdf', 'svg', 'eps', 'png'])
def test_nan_isolated_points():
y0 = [0, np.nan, 2, np.nan, 4, 5, 6]
y1 = [np.nan, 7, np.nan, 9, 10, np.nan, 12]
fig, ax = plt.subplots()
ax.plot(y0, '-o')
ax.plot(y1, '-o')
def test_path_no_doubled_point_in_to_polygon():
hand = np.array(
[[1.64516129, 1.16145833],
[1.64516129, 1.59375],
[1.35080645, 1.921875],
[1.375, 2.18229167],
[1.68548387, 1.9375],
[1.60887097, 2.55208333],
[1.68548387, 2.69791667],
[1.76209677, 2.56770833],
[1.83064516, 1.97395833],
[1.89516129, 2.75],
[1.9516129, 2.84895833],
[2.01209677, 2.76041667],
[1.99193548, 1.99479167],
[2.11290323, 2.63020833],
[2.2016129, 2.734375],
[2.25403226, 2.60416667],
[2.14919355, 1.953125],
[2.30645161, 2.36979167],
[2.39112903, 2.36979167],
[2.41532258, 2.1875],
[2.1733871, 1.703125],
[2.07782258, 1.16666667]])
(r0, c0, r1, c1) = (1.0, 1.5, 2.1, 2.5)
poly = Path(np.vstack((hand[:, 1], hand[:, 0])).T, closed=True)
clip_rect = transforms.Bbox([[r0, c0], [r1, c1]])
poly_clipped = poly.clip_to_bbox(clip_rect).to_polygons()[0]
assert np.all(poly_clipped[-2] != poly_clipped[-1])
assert np.all(poly_clipped[-1] == poly_clipped[0])
def test_path_to_polygons():
data = [[10, 10], [20, 20]]
p = Path(data)
assert_array_equal(p.to_polygons(width=40, height=40), [])
assert_array_equal(p.to_polygons(width=40, height=40, closed_only=False),
[data])
assert_array_equal(p.to_polygons(), [])
assert_array_equal(p.to_polygons(closed_only=False), [data])
data = [[10, 10], [20, 20], [30, 30]]
closed_data = [[10, 10], [20, 20], [30, 30], [10, 10]]
p = Path(data)
assert_array_equal(p.to_polygons(width=40, height=40), [closed_data])
assert_array_equal(p.to_polygons(width=40, height=40, closed_only=False),
[data])
assert_array_equal(p.to_polygons(), [closed_data])
assert_array_equal(p.to_polygons(closed_only=False), [data])
def test_path_deepcopy():
# Should not raise any error
verts = [[0, 0], [1, 1]]
codes = [Path.MOVETO, Path.LINETO]
path1 = Path(verts)
path2 = Path(verts, codes)
copy.deepcopy(path1)
copy.deepcopy(path2)
@pytest.mark.parametrize('phi', np.concatenate([
np.array([0, 15, 30, 45, 60, 75, 90, 105, 120, 135]) + delta
for delta in [-1, 0, 1]]))
def test_path_intersect_path(phi):
# test for the range of intersection angles
eps_array = [1e-5, 1e-8, 1e-10, 1e-12]
transform = transforms.Affine2D().rotate(np.deg2rad(phi))
# a and b intersect at angle phi
a = Path([(-2, 0), (2, 0)])
b = transform.transform_path(a)
assert a.intersects_path(b) and b.intersects_path(a)
# a and b touch at angle phi at (0, 0)
a = Path([(0, 0), (2, 0)])
b = transform.transform_path(a)
assert a.intersects_path(b) and b.intersects_path(a)
# a and b are orthogonal and intersect at (0, 3)
a = transform.transform_path(Path([(0, 1), (0, 3)]))
b = transform.transform_path(Path([(1, 3), (0, 3)]))
assert a.intersects_path(b) and b.intersects_path(a)
# a and b are collinear and intersect at (0, 3)
a = transform.transform_path(Path([(0, 1), (0, 3)]))
b = transform.transform_path(Path([(0, 5), (0, 3)]))
assert a.intersects_path(b) and b.intersects_path(a)
# self-intersect
assert a.intersects_path(a)
# a contains b
a = transform.transform_path(Path([(0, 0), (5, 5)]))
b = transform.transform_path(Path([(1, 1), (3, 3)]))
assert a.intersects_path(b) and b.intersects_path(a)
# a and b are collinear but do not intersect
a = transform.transform_path(Path([(0, 1), (0, 5)]))
b = transform.transform_path(Path([(3, 0), (3, 3)]))
assert not a.intersects_path(b) and not b.intersects_path(a)
# a and b are on the same line but do not intersect
a = transform.transform_path(Path([(0, 1), (0, 5)]))
b = transform.transform_path(Path([(0, 6), (0, 7)]))
assert not a.intersects_path(b) and not b.intersects_path(a)
# Note: 1e-13 is the absolute tolerance error used for
# `isclose` function from src/_path.h
# a and b are parallel but do not touch
for eps in eps_array:
a = transform.transform_path(Path([(0, 1), (0, 5)]))
b = transform.transform_path(Path([(0 + eps, 1), (0 + eps, 5)]))
assert not a.intersects_path(b) and not b.intersects_path(a)
# a and b are on the same line but do not intersect (really close)
for eps in eps_array:
a = transform.transform_path(Path([(0, 1), (0, 5)]))
b = transform.transform_path(Path([(0, 5 + eps), (0, 7)]))
assert not a.intersects_path(b) and not b.intersects_path(a)
# a and b are on the same line and intersect (really close)
for eps in eps_array:
a = transform.transform_path(Path([(0, 1), (0, 5)]))
b = transform.transform_path(Path([(0, 5 - eps), (0, 7)]))
assert a.intersects_path(b) and b.intersects_path(a)
# b is the same as a but with an extra point
a = transform.transform_path(Path([(0, 1), (0, 5)]))
b = transform.transform_path(Path([(0, 1), (0, 2), (0, 5)]))
assert a.intersects_path(b) and b.intersects_path(a)
@pytest.mark.parametrize('offset', range(-720, 361, 45))
def test_full_arc(offset):
low = offset
high = 360 + offset
path = Path.arc(low, high)
mins = np.min(path.vertices, axis=0)
maxs = np.max(path.vertices, axis=0)
np.testing.assert_allclose(mins, -1)
np.testing.assert_allclose(maxs, 1)
def test_disjoint_zero_length_segment():
this_path = Path(
np.array([
[824.85064295, 2056.26489203],
[861.69033931, 2041.00539016],
[868.57864109, 2057.63522175],
[831.73894473, 2072.89472361],
[824.85064295, 2056.26489203]]),
np.array([1, 2, 2, 2, 79], dtype=Path.code_type))
outline_path = Path(
np.array([
[859.91051028, 2165.38461538],
[859.06772495, 2149.30331334],
[859.06772495, 2181.46591743],
[859.91051028, 2165.38461538],
[859.91051028, 2165.38461538]]),
np.array([1, 2, 2, 2, 2],
dtype=Path.code_type))
assert not outline_path.intersects_path(this_path)
assert not this_path.intersects_path(outline_path)
def test_intersect_zero_length_segment():
this_path = Path(
np.array([
[0, 0],
[1, 1],
]))
outline_path = Path(
np.array([
[1, 0],
[.5, .5],
[.5, .5],
[0, 1],
]))
assert outline_path.intersects_path(this_path)
assert this_path.intersects_path(outline_path)
| [
"[email protected]"
] | |
8dba6fce8a430d0d0d0d1774d6d1c3870202a011 | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/ConstrainsAnalysis/ConstrainsAnalysisPDP1P2_20210712121606.py | 8efc27b4e0769bda9c34099e4e37ee69fe7cbe2b | [] | no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,675 | py | # author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
"""
The unit use is IS standard
"""
class ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun:
"""This is a power-based master constraints analysis"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.5, number_of_motor=12, C_DR=0):
"""
:param beta: weight fraction
:param Hp: P_motor/P_total
:param n: number of motor
:param K1: drag polar coefficient for 2nd order term
:param K2: drag polar coefficient for 1st order term
:param C_D0: the drag coefficient at zero lift
:param C_DR: additional drag caused, for example, by external stores,
braking parachutes or flaps, or temporary external hardware
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.beta = beta
self.hp = 1 - Hp
self.n = number_of_motor
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
self.k1 = ad.aerodynamics_without_pd(self.h, self.v).K1()
self.k2 = ad.aerodynamics_without_pd(self.h, self.v).K2()
self.cd0 = ad.aerodynamics_without_pd(self.h, self.v).CD_0()
self.cdr = C_DR
self.w_s = wing_load
self.g0 = 9.80665
self.coefficient = self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
self.cl = self.beta * self.w_s / self.q
# print(self.cl)
self.delta_cl = pd.delta_lift_coefficient(self.cl)
self.delta_cd0 = pd.delta_CD_0()
def master_equation(self, n, dh_dt, dV_dt):
cl = self.cl * n + self.delta_cl
cd = self.k1 * cl ** 2 + self.k2 * cl + self.cd0 + self.cdr + self.delta_cd0
p_w = self.coefficient * \
(self.q / (self.beta * self.w_s) *
cd + dh_dt / self.v + dV_dt / self.g0)
return p_w
def cruise(self):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=1, dh_dt=0, dV_dt=0)
return p_w
def climb(self, roc):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 300 knots, which is about 150 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=load_factor, dh_dt=0, dV_dt=0)
return p_w
def take_off(self):
"""
A320neo take-off speed is about 150 knots, which is about 75 m/s
required runway length is about 2000 m
K_TO is a constant greater than one set to 1.2 (generally specified by appropriate flying regulations)
"""
Cl_max_to = 2.3 # 2.3
K_TO = 1.2 # V_TO / V_stall
s_G = 1266
p_w = 2 / 3 * self.coefficient / self.v * self.beta * K_TO ** 2 / (
s_G * self.rho * self.g0 * Cl_max_to) * self.w_s ** (
3 / 2)
return p_w
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * \
(Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * \
(Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
def service_ceiling(self, roc=0.5):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
allFuncs = [take_off, stall_speed, cruise,
service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Mattingly_Method_with_DP_electric:
"""This is a power-based master constraints analysis
the difference between turbofun and electric for constrains analysis:
1. assume the thrust_lapse = 1 for electric propution
2. hp = 1 - hp_turbofun
"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.5, number_of_motor=12, C_DR=0):
"""
:param beta: weight fraction
:param Hp: P_motor/P_total
:param n: number of motor
:param K1: drag polar coefficient for 2nd order term
:param K2: drag polar coefficient for 1st order term
:param C_D0: the drag coefficient at zero lift
:param C_DR: additional drag caused, for example, by external stores,
braking parachutes or flaps, or temporary external hardware
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.beta = beta
self.hp = Hp # this is the difference part compare with turbofun
self.n = number_of_motor
# power lapse ratio
self.alpha = 1 # this is the difference part compare with turbofun
self.k1 = ad.aerodynamics_without_pd(self.h, self.v).K1()
self.k2 = ad.aerodynamics_without_pd(self.h, self.v).K2()
self.cd0 = ad.aerodynamics_without_pd(self.h, self.v).CD_0()
self.cdr = C_DR
self.w_s = wing_load
self.g0 = 9.80665
self.coefficient = self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
self.cl = self.beta * self.w_s / self.q
# print(self.cl)
self.delta_cl = pd.delta_lift_coefficient(self.cl)
self.delta_cd0 = pd.delta_CD_0()
def master_equation(self, n, dh_dt, dV_dt):
cl = self.cl * n + self.delta_cl
cd = self.k1 * cl ** 2 + self.k2 * cl + self.cd0 + self.cdr + self.delta_cd0
p_w = self.coefficient * \
(self.q / (self.beta * self.w_s) *
cd + dh_dt / self.v + dV_dt / self.g0)
return p_w
def cruise(self):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=1, dh_dt=0, dV_dt=0)
return p_w
def climb(self, roc):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 300 knots, which is about 150 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=load_factor, dh_dt=0, dV_dt=0)
return p_w
def take_off(self):
"""
A320neo take-off speed is about 150 knots, which is about 75 m/s
required runway length is about 2000 m
K_TO is a constant greater than one set to 1.2 (generally specified by appropriate flying regulations)
"""
Cl_max_to = 2.3 # 2.3
K_TO = 1.2 # V_TO / V_stall
s_G = 1266
p_w = 2 / 3 * self.coefficient / self.v * self.beta * K_TO ** 2 / (
s_G * self.rho * self.g0 * Cl_max_to) * self.w_s ** (
3 / 2)
return p_w
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * \
(Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * \
(Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
def service_ceiling(self, roc=0.5):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
allFuncs = [take_off, stall_speed, cruise,
service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun:
"""This is a power-based master constraints analysis based on Gudmundsson_method"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.5, number_of_motor=12, e=0.75, AR=10.3):
"""
:param beta: weight fraction
:param e: wing planform efficiency factor is between 0.75 and 0.85, no more than 1
:param AR: wing aspect ratio, normally between 7 and 10
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.w_s = wing_load
self.g0 = 9.80665
self.beta = beta
self.hp = 1 - Hp
self.n = number_of_motor
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
h = 2.43 # height of winglets
b = 35.8
# equation 9-88, If the wing has winglets the aspect ratio should be corrected
ar_corr = AR * (1 + 1.9 * h / b)
self.k = 1 / (np.pi * ar_corr * e)
self.coefficient = self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
cl = self.beta * self.w_s / self.q
self.delta_cl = pd.delta_lift_coefficient(cl)
self.delta_cd0 = pd.delta_CD_0()
# TABLE 3-1 Typical Aerodynamic Characteristics of Selected Classes of Aircraft
cd_min = 0.02
cd_to = 0.03
cl_to = 0.8
self.v_to = 68
self.s_g = 1480
self.mu = 0.04
self.cd_min = cd_min + self.delta_cd0
self.cl = cl + self.delta_cl
self.cd_to = cd_to + self.delta_cd0
self.cl_to = cl_to + self.delta_cl
def cruise(self):
p_w = self.q / self.w_s * (self.cd_min + self.k * self.cl ** 2)
return p_w * self.coefficient
def climb(self, roc):
p_w = roc / self.v + self.q * self.cd_min / self.w_s + self.k * self.cl
return p_w * self.coefficient
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 100 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
q = 0.5 * self.rho * v ** 2
p_w = q / self.w_s * (self.cd_min + self.k *
(load_factor / q * self.w_s + self.delta_cl) ** 2)
return p_w * self.coefficient
def take_off(self):
q = self.q / 2
p_w = self.v_to ** 2 / (2 * self.g0 * self.s_g) + q * self.cd_to / self.w_s + self.mu * (
1 - q * self.cl_to / self.w_s)
return p_w * self.coefficient
def service_ceiling(self, roc=0.5):
vy = (2 / self.rho * self.w_s *
(self.k / (3 * self.cd_min)) ** 0.5) ** 0.5
q = 0.5 * self.rho * vy ** 2
p_w = roc / vy + q / self.w_s * \
(self.cd_min + self.k * (self.w_s / q + self.delta_cl) ** 2)
# p_w = roc / (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5 + 4 * (
# self.k * self.cd_min / 3) ** 0.5
return p_w * self.coefficient
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * \
(Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * \
(Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
allFuncs = [take_off, stall_speed, cruise,
service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric:
"""This is a power-based master constraints analysis based on Gudmundsson_method
the difference between turbofun and electric for constrains analysis:
1. assume the thrust_lapse = 1 for electric propution
2. hp = 1 - hp_turbofun
"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.5, number_of_motor=12, e=0.75, AR=10.3):
"""
:param beta: weight fraction
:param e: wing planform efficiency factor is between 0.75 and 0.85, no more than 1
:param AR: wing aspect ratio, normally between 7 and 10
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.w_s = wing_load
self.g0 = 9.80665
self.beta = beta
self.hp = Hp # this is the difference part compare with turbofun
self.n = number_of_motor
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
# power lapse ratio
self.alpha = 1 # this is the difference part compare with turbofun
h = 2.43 # height of winglets
b = 35.8
# equation 9-88, If the wing has winglets the aspect ratio should be corrected
ar_corr = AR * (1 + 1.9 * h / b)
self.k = 1 / (np.pi * ar_corr * e)
self.coefficient = self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
cl = self.beta * self.w_s / self.q
self.delta_cl = pd.delta_lift_coefficient(cl)
self.delta_cd0 = pd.delta_CD_0()
# TABLE 3-1 Typical Aerodynamic Characteristics of Selected Classes of Aircraft
cd_min = 0.02
cd_to = 0.03
cl_to = 0.8
self.v_to = 68
self.s_g = 1480
self.mu = 0.04
self.cd_min = cd_min + self.delta_cd0
self.cl = cl + self.delta_cl
self.cd_to = cd_to + self.delta_cd0
self.cl_to = cl_to + self.delta_cl
def cruise(self):
p_w = self.q / self.w_s * (self.cd_min + self.k * self.cl ** 2)
return p_w * self.coefficient
def climb(self, roc):
p_w = roc / self.v + self.q * self.cd_min / self.w_s + self.k * self.cl
return p_w * self.coefficient
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 100 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
q = 0.5 * self.rho * v ** 2
p_w = q / self.w_s * (self.cd_min + self.k *
(load_factor / q * self.w_s + self.delta_cl) ** 2)
return p_w * self.coefficient
def take_off(self):
q = self.q / 2
p_w = self.v_to ** 2 / (2 * self.g0 * self.s_g) + q * self.cd_to / self.w_s + self.mu * (
1 - q * self.cl_to / self.w_s)
return p_w * self.coefficient
def service_ceiling(self, roc=0.5):
vy = (2 / self.rho * self.w_s *
(self.k / (3 * self.cd_min)) ** 0.5) ** 0.5
q = 0.5 * self.rho * vy ** 2
p_w = roc / vy + q / self.w_s * \
(self.cd_min + self.k * (self.w_s / q + self.delta_cl) ** 2)
# p_w = roc / (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5 + 4 * (
# self.k * self.cd_min / 3) ** 0.5
return p_w * self.coefficient
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * \
(Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * \
(Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
allFuncs = [take_off, stall_speed, cruise,
service_ceiling, level_turn, climb]
if __name__ == "__main__":
n = 100
w_s = np.linspace(100, 9000, n)
constrains_name = ['take off', 'stall speed', 'cruise', 'service ceiling', 'level turn @3000m',
'climb @S-L', 'climb @3000m', 'climb @7000m']
constrains = np.array([[0, 68, 0.988, 0.5], [0, 80, 1, 0.5], [11300, 230, 0.948, 0.8],
[11900, 230, 0.78, 0.5], [3000, 100, 0.984, 0.8], [0, 100, 0.984, 0.5],
[3000, 200, 0.975, 0.6], [7000, 230, 0.96, 0.8]])
color = ['c', 'k', 'b', 'g', 'y', 'plum', 'violet', 'm']
label = ['feasible region with PD', 'feasible region with PD', 'feasible region Gudmundsson',
'feasible region without PD', 'feasible region without PD', 'feasible region Mattingly']
methods = [ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun,
ConstrainsAnalysis_Mattingly_Method_with_DP_electric,
ca.ConstrainsAnalysis_Mattingly_Method,
ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun,
ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric,
ca.ConstrainsAnalysis_Gudmundsson_Method]
m = constrains.shape[0]
p_w = np.zeros([m, n, 8])
# plots
fig, ax = plt.subplots(4, 2, sharex=True, sharey=True, figsize=(20, 20))
ax = ax.flatten()
for k in range(8):
for i in range(m):
for j in range(n):
h = constrains[i, 0]
v = constrains[i, 1]
beta = constrains[i, 2]
hp = constrains[i, 3]
if k==0 or k==1 or k==4 or k==5:
problem = methods[k](h, v, beta, w_s[j], hp)
elif k==3 or k==7:
problem = methods[k](h, v, beta, w_s[j])
if i >= 5:
p_w[i, j, k] = problem.allFuncs[-1](problem, roc=15 - 5 * (i - 5))
else:
p_w[i, j, k] = problem.allFuncs[i](problem)
if k==2:
p_w[i, j, k] =
if i == 1:
ax[k].plot(p_w[i, :], np.linspace(0, 250, n), color=color[i], label=constrains_name[i])
else:
ax[k].plot(w_s, p_w[i, :], color=color[i], label=constrains_name[i])
p_w[1, :] = 200 / (p_w[1, -1] - p_w[1, 20]) * (w_s - p_w[1, 2])
ax[k].fill_between(w_s, np.amax(p_w[0:m - 1, :], axis=0), 200, color='b', alpha=0.25, label=label[k])
# plt.xlabel('Wing Load: $W_{TO}$/S (N/${m^2}$)')
# plt.ylabel('Power-to-Load: $P_{SL}$/$W_{TO}$ (W/N)')
ax[k].legend(bbox_to_anchor=(1.002, 1), loc="upper left")
# plt.gca().add_artist(l1)
# axs[k].xlim(100, 9000)
# axs[k].ylim(0, 200)
# plt.tight_layout()
ax[k].grid()
# fig.show()
| [
"[email protected]"
] | |
4c56aadf4698771dcbc22a690ca36a0bc8fb8628 | 6ac0aeea8229c4e2c7a041e85c3afeeb106c6b01 | /use_kapl_util.py | 38117a5ea53acf4d7f2d27eef75d14b2812f312f | [] | no_license | waiteb15/py3intro | 325dafaaa642052280d6c050eacf8b406b40e01d | 68b30f147e7408220490a46d3e595acd60513e9e | refs/heads/master | 2020-03-27T10:50:25.928836 | 2019-02-28T21:47:11 | 2019-02-28T21:47:11 | 146,448,412 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | #!/usr/bin/env python
import KAPL_UTIL
KAPL_UTIL.spam()
KAPL_UTIL.ham()
# DON't do this, its supposed to be private...ssshhhh!
#KAPL_UTIL._eggs()
| [
"[email protected]"
] | |
3ba9c9bca8655ce76da6ea3578d0b108e258fc72 | 733e6b62740c35968a0c554e64642f9ebad53bc4 | /data/Scripts/Tts.py | df7fbacaf12f30dec1ad33a28c9c58b1425a7bc9 | [] | no_license | ten0s/autokey | 5d492013aed1ad9812fe58e179b2793b5b5865fe | 875e69c115e7cb4dc276e2acd2601fd96b669d7c | refs/heads/master | 2023-02-10T09:51:20.880320 | 2023-02-01T09:31:35 | 2023-02-01T09:31:35 | 3,323,213 | 12 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | system.exec_command("tts.sh", False) | [
"="
] | = |
125feb3e297701f0b402bc58baa66f53b7b43a05 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03427/s426447268.py | 447a51a40727345bcb9f33c63d6d42f8cb865b83 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | N = list(input())
num = len(N)
if N.count('9') == num:
print(9*num)
elif N.count('9') == num-1 and N[0] != '9':
print(int(N[0])+9*(num-1))
else:
if num <= 1:
print(''.join(N))
else:
ans = int(N[0])-1
ans += 9*(num-1)
print(ans) | [
"[email protected]"
] | |
e7d145c6fcadca01a666a56be951e42f0637c3d7 | aea7b59d10a72ccac35a06e6d31a508771efc5a0 | /Day 26/AppendAndDelete.py | e2135a134841178d356dbe8bc4a59ede08a84ed8 | [] | no_license | divyatejakotteti/100DaysOfCode | d52871b27146cd1b1ef2b997e93b9a96ca8ac5a9 | 3c8555b021482565f56d7fb86fa5dacb304dfd3c | refs/heads/master | 2023-02-01T10:48:19.082268 | 2020-12-21T06:34:02 | 2020-12-21T06:34:02 | 294,888,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,827 | py | '''
You have a string of lowercase English alphabetic letters. You can perform two types of operations on the string:
Append a lowercase English alphabetic letter to the end of the string.
Delete the last character in the string. Performing this operation on an empty string results in an empty string.
Given an integer,
, and two strings, and , determine whether or not you can convert to by performing exactly of the above operations on . If it's possible, print Yes. Otherwise, print No.
Function Description
Complete the appendAndDelete function in the editor below. It should return a string, either Yes or No.
appendAndDelete has the following parameter(s):
s: the initial string
t: the desired string
k: an integer that represents the number of operations
Input Format
The first line contains a string
, the initial string.
The second line contains a string , the desired final string.
The third line contains an integer
, the number of operations.
Constraints
s and t consist of lowercase English alphabetic letters,.
Output Format
Print Yes if you can obtain string
by performing exactly operations on
. Otherwise, print No.
Sample Input 0
hackerhappy
hackerrank
9
Sample Output 0
Yes
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the appendAndDelete function below.
def appendAndDelete(s, t, k):
count=0
for i,j in zip(s,t):
if(i==j):
count+=1
else:
break
t_len=len(s)-len(t)
if(t_len<=2*count+k and t_len%2==k%2 or t_len<k):
return "Yes"
else:
return "No"
if __name__ == '__main__':
s = input()
t = input()
k = int(input())
result = appendAndDelete(s, t, k)
| [
"[email protected]"
] | |
5a0ae2d19148d1aad148a2440476b4e73528714c | 1df8feb285b8b5bce52ae545005d6b753ddebc1c | /trix/trix_core/fabfile.py | 547d59ba67134a0b6f5ac3d88cf172a2b6498724 | [
"BSD-3-Clause"
] | permissive | msabramo/trix2 | de5080b0752b98ea3d147d779a052a32041bd8b8 | bb41e676be8b3a4ae20948db70f4c4c5ab0283a0 | refs/heads/master | 2023-07-06T16:42:12.304879 | 2015-01-08T11:44:11 | 2015-01-08T11:44:11 | 28,970,420 | 0 | 0 | null | 2015-01-08T14:49:44 | 2015-01-08T14:49:43 | CSS | UTF-8 | Python | true | false | 52 | py | from trix.project.develop.fabrictasks.i18n import *
| [
"[email protected]"
] | |
de0396149e70d39521730b0fbb62dbbccd8ee1ee | 0193e4024c8236db023558d70233f988e55b5a21 | /sdk/python/tekton_pipeline/models/v1beta1_pipeline.py | 353baffa0a352f493e12c828334ae2d4be61bd28 | [
"Apache-2.0"
] | permissive | tektoncd/experimental | 3ae1202cab489b3ba631dfc15223b13ac9215ed1 | ee13de632e126a5595944d6303bae36cad4555b7 | refs/heads/main | 2023-08-31T16:57:09.444985 | 2023-06-22T15:59:01 | 2023-07-03T11:22:17 | 180,445,039 | 101 | 138 | Apache-2.0 | 2023-09-14T11:41:30 | 2019-04-09T20:32:54 | Python | UTF-8 | Python | false | false | 7,084 | py | # Copyright 2021 The Tekton Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Tekton
Tekton Pipeline # noqa: E501
The version of the OpenAPI document: v0.17.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from tekton_pipeline.configuration import Configuration
class V1beta1Pipeline(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1PipelineSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1beta1Pipeline - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1beta1Pipeline. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1Pipeline. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1Pipeline.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1Pipeline. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1beta1Pipeline. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1Pipeline. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1Pipeline.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1Pipeline. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1Pipeline. # noqa: E501
:return: The metadata of this V1beta1Pipeline. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1Pipeline.
:param metadata: The metadata of this V1beta1Pipeline. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1beta1Pipeline. # noqa: E501
:return: The spec of this V1beta1Pipeline. # noqa: E501
:rtype: V1beta1PipelineSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1beta1Pipeline.
:param spec: The spec of this V1beta1Pipeline. # noqa: E501
:type: V1beta1PipelineSpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1Pipeline):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1Pipeline):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
f28c701167115edacd2d05a27a9cbfd877a7ad29 | c41d5fae2358d68c6e25724e78aed37e5c5bc9ef | /eemeter/modeling/split.py | 5b98a7424826ed14cf05027198374b1d64659f9c | [
"MIT"
] | permissive | hj91/eemeter | 8087bbf9ceac1e93952914e4afc01c658a355893 | 9d2163a3911f5655b155caffb8b6c7e8653483c3 | refs/heads/master | 2021-07-10T08:23:40.137087 | 2017-10-09T18:57:57 | 2017-10-09T18:57:57 | 106,719,500 | 1 | 0 | null | 2017-10-12T16:42:22 | 2017-10-12T16:42:22 | null | UTF-8 | Python | false | false | 8,538 | py | import logging
import traceback
import numpy as np
import eemeter.modeling.exceptions as model_exceptions
from eemeter.structures import EnergyTrace
logger = logging.getLogger(__name__)
class SplitModeledEnergyTrace(object):
''' Light wrapper around models applicable to a single trace which
fits and predicts multiple models for different segments.
Parameters
----------
trace : eemeter.structures.EnergyTrace
Trace to be modeled.
formatter : eemeter.modeling.formatter.Formatter
Formatter to prep trace data for modeling.
model_mapping : dict
Items of this dictionary map `modeling_period_label` s to models
modeling_period_set : eemeter.structures.ModelingPeriodSet
The set of modeling periods over which models should be applicable.
'''
def __init__(self, trace, formatter, model_mapping, modeling_period_set):
self.trace = trace
self.formatter = formatter
self.model_mapping = model_mapping
self.modeling_period_set = modeling_period_set
self.fit_outputs = {}
def __repr__(self):
return (
"SplitModeledEnergyTrace(trace={}, formatter={},"
" model_mapping={}, modeling_period_set={})"
.format(self.trace, self.formatter, self.model_mapping,
self.modeling_period_set)
)
def fit(self, weather_source):
''' Fit all models associated with this trace.
Parameters
----------
weather_source : eemeter.weather.ISDWeatherSource
Weather source to use in creating covariate data.
'''
for modeling_period_label, modeling_period in \
self.modeling_period_set.iter_modeling_periods():
filtered_data = self._filter_by_modeling_period(
self.trace, modeling_period)
filtered_trace = EnergyTrace(
self.trace.interpretation, data=filtered_data,
unit=self.trace.unit)
model = self.model_mapping[modeling_period_label]
outputs = {
"status": None,
"traceback": None,
"input_data": None,
"start_date": None,
"end_date": None,
"n_rows": None,
"model_fit": {},
}
# fail with DataSufficiencyException if bad weather source
if weather_source is None:
message = (
'No weather source found for trace {} in {} period'
.format(self.trace.trace_id, modeling_period_label)
)
logger.warn(message)
try:
raise model_exceptions.DataSufficiencyException(message)
except:
outputs.update({
"status": "FAILURE",
"traceback": traceback.format_exc(),
})
self.fit_outputs[modeling_period_label] = outputs
continue
# attempt to create input data
try:
input_data = self.formatter.create_input(
filtered_trace, weather_source)
except:
logger.warn(
'Input data formatting failed for trace {} in {} period.'
.format(self.trace.trace_id, modeling_period_label)
)
outputs.update({
"status": "FAILURE",
"traceback": traceback.format_exc(),
})
else:
input_description = self.formatter.describe_input(input_data)
input_serialization = self.formatter.serialize_input(
input_data)
input_mask = self.formatter.get_input_data_mask(
input_data)
outputs.update({
"input_data_serialization": input_serialization,
"input_mask": input_mask, # missing days
"start_date": input_description.get('start_date'),
"end_date": input_description.get('end_date'),
"n_rows": input_description.get('n_rows'),
"trace": filtered_trace,
})
try:
model_fit = model.fit(input_data)
except:
tb = traceback.format_exc()
logger.warn(
'{} fit failed for trace {} in {} period.'
.format(model, self.trace.trace_id, modeling_period_label)
)
outputs.update({
"status": "FAILURE",
"traceback": tb,
})
else:
logger.debug(
'{} fit successful for trace {} in {} period.'
.format(model, self.trace.trace_id, modeling_period_label)
)
outputs["model_fit"].update(model_fit)
outputs.update({
"status": "SUCCESS",
})
self.fit_outputs[modeling_period_label] = outputs
return self.fit_outputs
def predict(self, modeling_period_label, demand_fixture_data, **kwargs):
''' Predict for any one of the modeling_periods associated with this
trace. Light wrapper around :code:`model.predict(` method.
Parameters
----------
modeling_period_label : str
Modeling period indicating which model to use in making the
prediction.
demand_fixture_data : object
Data (formatted by :code:`self.formatter`) over which prediction
should be made.
params : object, default None
Fitted parameters for the model. If :code:`None`, use parameters
found when :code:`.fit(` method was called.
'''
outputs = self.fit_outputs[modeling_period_label]
model = self.model_mapping[modeling_period_label]
if outputs["status"] == "FAILURE":
logger.warn(
'{} cannot predict in failed {} period for trace {}.'
.format(model, modeling_period_label, self.trace.trace_id)
)
return None
if 'params' not in kwargs:
kwargs['params'] = outputs["model_fit"]["model_params"]
return model.predict(demand_fixture_data, **kwargs)
def compute_derivative(self, modeling_period_label, derivative_callable,
derivative_callable_kwargs):
''' Compute a modeling derivative for this modeling period.
Parameters
----------
modeling_period_label : str
Label for modeling period for which derivative should be computed.
derivative_callable : callable
Callable which can be used as follows:
.. code-block: python
>>> derivative_callable(formatter, model, **kwargs)
derivative_callable_kwargs : dict
Arbitrary keyword arguments to be passed to the derviative callable
'''
outputs = self.fit_outputs[modeling_period_label]
if outputs["status"] == "FAILURE":
return None
model = self.model_mapping[modeling_period_label]
try:
derivative = derivative_callable(self.formatter, model,
**derivative_callable_kwargs)
except Exception:
logger.exception("Derivative computation failed.")
return None
return derivative
@staticmethod
def _filter_by_modeling_period(trace, modeling_period):
start = modeling_period.start_date
end = modeling_period.end_date
if start is None:
if end is None:
filtered_df = trace.data.copy()
else:
filtered_df = trace.data[:end].copy()
else:
if end is None:
filtered_df = trace.data[start:].copy()
else:
filtered_df = trace.data[start:end].copy()
# require NaN last data point as cap
if filtered_df.shape[0] > 0:
last_index = filtered_df.index[-1]
filtered_df.set_value(last_index, 'value', np.nan)
filtered_df.set_value(last_index, 'estimated', False)
return filtered_df
| [
"[email protected]"
] | |
bd1454cf6557e9e036d562f13f13398a70985319 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/pppservercapacity_979eb526d86ff91bb93b7632215d608e.py | ec4697519c54b2c4c12c1b34054c32ffd3a7d9c2 | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,068 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class PppServerCapacity(Base):
"""This object measures the PPPoX server session capacity of the LAC DUT.
The PppServerCapacity class encapsulates a list of pppServerCapacity resources that are managed by the user.
A list of resources can be retrieved from the server using the PppServerCapacity.find() method.
The list can be managed by using the PppServerCapacity.add() and PppServerCapacity.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'pppServerCapacity'
_SDM_ATT_MAP = {
'InputParameters': 'inputParameters',
'Mode': 'mode',
'Name': 'name',
}
_SDM_ENUM_MAP = {
'mode': ['existingMode', 'newMode'],
}
def __init__(self, parent, list_op=False):
super(PppServerCapacity, self).__init__(parent, list_op)
@property
def Results(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_cd98305315795d88eb1b6a5eaa45cd4b.Results): An instance of the Results class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_cd98305315795d88eb1b6a5eaa45cd4b import Results
if self._properties.get('Results', None) is not None:
return self._properties.get('Results')
else:
return Results(self)._select()
@property
def TestConfig(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_f1c858f32d12ca4b5bc8592aab77f4a6.TestConfig): An instance of the TestConfig class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_f1c858f32d12ca4b5bc8592aab77f4a6 import TestConfig
if self._properties.get('TestConfig', None) is not None:
return self._properties.get('TestConfig')
else:
return TestConfig(self)
@property
def InputParameters(self):
# type: () -> str
"""
Returns
-------
- str: Input Parameters
"""
return self._get_attribute(self._SDM_ATT_MAP['InputParameters'])
@InputParameters.setter
def InputParameters(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['InputParameters'], value)
@property
def Mode(self):
# type: () -> str
"""
Returns
-------
- str(existingMode | newMode): Test mode
"""
return self._get_attribute(self._SDM_ATT_MAP['Mode'])
@Mode.setter
def Mode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Mode'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Test name
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, InputParameters=None, Mode=None, Name=None):
# type: (str, str, str) -> PppServerCapacity
"""Updates pppServerCapacity resource on the server.
Args
----
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, InputParameters=None, Mode=None, Name=None):
# type: (str, str, str) -> PppServerCapacity
"""Adds a new pppServerCapacity resource on the server and adds it to the container.
Args
----
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with all currently retrieved pppServerCapacity resources using find and the newly added pppServerCapacity resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained pppServerCapacity resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, InputParameters=None, Mode=None, Name=None):
# type: (str, str, str) -> PppServerCapacity
"""Finds and retrieves pppServerCapacity resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve pppServerCapacity resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all pppServerCapacity resources from the server.
Args
----
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with matching pppServerCapacity resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of pppServerCapacity data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the pppServerCapacity resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Apply(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the apply operation on the server.
Applies the specified Quick Test.
apply(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyAsync operation on the server.
applyAsync(async_operation=bool)
--------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the applyAsyncResult operation on the server.
applyAsyncResult(async_operation=bool)bool
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool:
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
applyITWizardConfiguration(async_operation=bool)
------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
generateReport(async_operation=bool)string
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: This method is asynchronous and has no return value.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(async_operation=bool)list
-----------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
run(InputParameters=string, async_operation=bool)list
-----------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(InputParameters=string, async_operation=bool)
---------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
waitForTest(async_operation=bool)list
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('waitForTest', payload=payload, response_object=None)
| [
"[email protected]"
] | |
673c796757af9133dba1e31c837b91a67964f6db | 60e34c75afec810f4b1c2c82495d8d3017f32d33 | /02栈和队列/01Stack_list.py | 0150ee592e0a398ad418dd1bc8eed5a13afc24a8 | [] | no_license | ares5221/Data-Structures-and-Algorithms | af97c6b34b810c37f152af595846870a7b9b304b | 7c51eee0c375136f995cc063ffc60d33a520d748 | refs/heads/master | 2021-07-17T21:18:46.556958 | 2018-12-03T07:30:13 | 2018-12-03T07:30:13 | 144,227,642 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | # coding = utf-8
class Stack(object):
'''用顺序表实现栈'''
def __init__(self):
'''初始化栈为空列表'''
self.items = []
def isEmpty(self):
'''判断栈是否为空'''
return self.items == []
def size(self):
'''返回栈的大小'''
return len(self.items)
def pop(self):
'''出栈'''
if len(self.items) == 0:
print("Stack is empty, you can not pop anything")
return
del self.items[len(self.items) - 1]
return
def push(self, num):
'''入栈'''
self.items.append(num)
return
def peek(self):
'''返回栈顶元素'''
if self.items == []:
print("Stack is empty, no peek")
return
return self.items[len(self.items) - 1]
def printStack(self):
'''打印栈'''
for k in self.items:
print(k, end=" ")
print("")
def delete(self):
'''销毁栈'''
k = len(self.items)
i = 0
while k > 0:
del self.items[0]
k -= 1
del k
del i
print("delete Stack successfully!")
if '__main__' == __name__:
List = [1, 2, 3, 4, 5, 6]
l = Stack()
print("将List压入栈中:", end=" ")
for i in List:
l.push(i)
l.printStack()
print("栈是否为空:", end=" ")
print("空" if l.isEmpty() == True else "非空")
print("栈的大小为:%d" % l.size())
print("出栈:", end=" ")
l.pop()
l.printStack()
print("入栈(num=10):", end=" ")
l.push(10)
l.printStack()
print("栈顶元素为:%d" % l.peek())
l.delete()
| [
"[email protected]"
] | |
79247f284ea58a8aab0d7a2f936b3d0b229c43a0 | 141c5ef07df60b1c9f726e4605b78a2a7c1243e9 | /meross_iot/model/plugin/power.py | d9e4a85dc275ae2f69b7c4409a9f4aac0062d785 | [
"MIT"
] | permissive | albertogeniola/MerossIot | cd8abaac236a7fb442bdf9613c7e6760123c8bd3 | de1c22696511eee106961da3f22d3030ed9c254c | refs/heads/0.4.X.X | 2023-09-01T11:11:09.793153 | 2023-04-01T15:15:50 | 2023-04-01T15:15:50 | 146,365,723 | 467 | 102 | MIT | 2023-09-11T06:42:13 | 2018-08-27T23:30:56 | Python | UTF-8 | Python | false | false | 780 | py | from datetime import datetime
class PowerInfo(object):
def __init__(self, current_ampere: float, voltage_volts: float, power_watts: float, sample_timestamp: datetime):
self._current = current_ampere
self._voltage = voltage_volts
self._power = power_watts
self._sample_timestamp = sample_timestamp
@property
def power(self) -> float:
return self._power
@property
def voltage(self) -> float:
return self._voltage
@property
def current(self) -> float:
return self._current
@property
def sample_timestamp(self) -> datetime:
return self._sample_timestamp
def __str__(self):
return f"POWER = {self._power} W, VOLTAGE = {self._voltage} V, CURRENT = {self._current} A"
| [
"[email protected]"
] | |
9d40a317734b30fd9371baa7c53d54f329c4474e | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/binaryTree_20200623143621.py | f8964b2ad95245e959bc54fa0f8e60157601b933 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,215 | py | # define node class
class Node(object):
# constructor
def __init__(self,value):
self.value = value
self.left = None
self.right = None
# define binary tree class
class BinaryTree(object):
def __init__(self,root):
# converting data
self.root = Node(root)
def print_tree(self,traversal_type):
if traversal_type == "preorder":
return self.preorder_print(tree.root," ")
elif traversal_type == "inorder":
return self.inorder_print (tree.root,"",0)
elif traversal_type == "postorder":
return self.postorder_print(tree.root," ")
else:
print("Traversal type" + str(traversal_type) + "is not supported.")
return False
# root -->left--->right(preorder)
def preorder_print(self,start,traversal):
if start:
traversal += (str(start.value) + "-")
# calling the function recursively
traversal = self.preorder_print(start.left,traversal)
traversal = self.preorder_print(start.right,traversal)
return traversal
# left - root -right
def inorder_print(self,start,traversal,count):
if start:
traversal = self.inorder_print(start.left,traversal,count =count+1)
traversal += (str(start.value) + "-")
traversal = self.inorder_print(start.right,traversal,count =count+1)
return (traversal
# left ->right -> root
def postorder_print(self,start,traversal):
if start:
traversal = self.postorder_print(start.left,traversal)
traversal = self.postorder_print(start.right,traversal)
traversal +=(str(start.value) + "-" )
return traversal
# 1 is root
# creating left child
'''
1
# / \
2 3
/ \
4 5
'''
tree = BinaryTree(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
tree.root.right.right = Node(7)
tree.root.right.left = Node(6)
# print(tree.print_tree("preorder"))
print(tree.print_tree("inorder"))
# print(tree.print_tree("postorder")) | [
"[email protected]"
] | |
e30693a3a31fad2a6436482b4dcaae11d3c8a9ef | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/295/92773/submittedfiles/testes.py | edca1d262cf945e495df921179da0f07739e2f99 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,041 | py | a = []
n = 4
for i in range(0,n,1):
a.append(input("Digite umvalor:"))
for i in range(0,n,1):
if a[n] %2 ==0:
print(a[n])
a = [2,4,6,8,10]
n = 2
for i in range (0,len(a),1):
print (a[i]//2)
print("----------------------------------------------------")
for i in range(1,11,1):
a.append(float(input("Digite o elemento :")))
print(a)
print(sum(a))
print(len(a))
del a[1]
print(a)
print(len(a))
for i in range(9 , -1, -1):
print(a[i])
print("----------------------------------------------------")
a = []
for i in range(1,11,1):
a.append(input("Digite o elemento :"))
print(a)
for i in range(9 , -1, -1):
print(a[i])
n = int(input("Digite o numero de notas :"))
while n<=1:
n = int(input("Digite o numero de notas :"))
notas = []
for i in range(0,n,1):
notas.append(float(input("Digite a nota%d[%d]: "% (i+1, i))))
media = 0
for i in range(0,n,1):
media += notas[i]/float(n)
print(notas)
print(media)
notas = []
for i in range(0,50,1):
notas.append(float(input("Digite a nota%d[%d]: "% (i+1, i))))
media = 0
for i in range(0,50,1):
media += notas[i]/50.0
print(notas)
print(media)
n = int(input("Digite um número inteiro nao negativo :"))
i = 1
cont = 1
while i<=n:
if n>0:
cont = cont*i
i = i + 1
print("%d! = %d" % (n,cont))
def primo(n):
contador = 0
for i in range(2,n,1):
if n%i == 0:
contador += 1
break
if contador == 0:
return True
else:
return False
print(primo(11))
def raiz(x,n):
resultado = x**(1/float(n))
return resultado
print(raiz(8,3))
n = int(input("Digite o valor de n :"))
i = 1
cont = 0
while i<n:
if i%2==1:
cont = cont + 1
i = i + 1
print(cont)
a = float(input("Digite um numero :"))
b = a/15
print("%.4f" % b)
a = int(input("Digite um numero :"))
if a%2==0:
print("par")
else:
print("impar")
a = int(input("Digite um numero a:"))
b = int(input("Digite um numero b:"))
if (a + b) > 10:
print(a)
if (a + b) > 10:
print(b)
a = float(input("Digite um numero :"))
if a > 20:
print(a)
p1 = float(input("Digite o peso 1 :"))
c1 = float(input("Digite o comprimento 1 :"))
p2 = float(input("Digite o peso 2 :"))
c2 = float(input("Digite o comprimento 2 :"))
if (p1*c1) == (p2*c2):
print("0")
elif (p1*c1) > (p2*c2):
print("-1")
else:
print("1")
p = float(input("Digite o peso: "))
h = float(input("Digite a altura: "))
imc = p/(h**2)
if imc<20:
print("ABAIXO")
#ENTRADA
a = int(input("que horas são? (0-23) "))
#PROCESSAMENTO E SAÍDA
if a >= 3 and a < 12:
print("BOM DIA")
elif a >= 12 and a < 18:
print("Boa tarde")
elif a < 3:
print("Boa noite")
elif a >= 18:
print("Boa noite")
else:
print("hora invalida")
"""if a < 0 or a > 23:
print("Hora invalida")
else:
if a > 3 and a < 12:
print("Bom dia")
elif a >= 12 and a < 18:
print("Boa tarde")
else:
print("Boa noite")"""
a= (5%2)!=0
print(a)
a = float(input("digite o ano:"))
b = float(input("é o mundial do palmeiras ou a cachaça? digite 1 ou 2 respectivamente:"))
c = float(input("se responder mundial ta falso"))
n1 = float(input("Digite n1:"))
n2 = float(input("Digite n2:"))
n3 = float(input("Digite n3:"))
total = (n1+n2+n3)
print(total)
a = (10//5)%3
print(a)
a = float(input("Digite a:"))
b = float(input("Digite b:"))
c = a+b/2
print(c)
a = 5.2
print("a=%.5f" % a)
unidade=float(input("digite uma medida em metros: "))
converte=(unidade*100)
print("o valor em cetimetros da unidade é %2.f" %converte)
nota1=float(input("digite nota 1: "))
print(nota1)
nota2=float(input("digite nota 2: "))
print(nota2)
nota3=float(input("digite nota 3: "))
print(nota3)
nota4=float(input("digite nota 4: "))
print(nota4)
media=((nota1+nota2+nota3+nota4)/4)
print("----------------------------")
print("a media do aluno eh %2.f" % media)
print("----------------------------")
| [
"[email protected]"
] | |
b2e92562247cf0977bc31277c4a90d20cf359b35 | cf39421d0948655f587445a484bf04fd1986a06f | /microsvc/subgraph.py | 11607d214b7cebee5e957d00f18d476d4148f5c3 | [
"CC0-1.0"
] | permissive | aidaakuyeva/RichContextMetadata | 1b475a94bd3d7ad0118bc5faeb585cd9e7209f59 | 2b038e69a6cc234dd5354e6e056b5b46fec2f3ba | refs/heads/master | 2023-01-11T04:04:15.630422 | 2020-11-16T16:12:29 | 2020-11-16T16:12:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py | #!/usr/bin/env python
# encoding: utf-8
import json
import networkx as nx
import os
import rdflib
import sys
import tempfile
TTL_PREAMBLE = """
@prefix cito: <http://purl.org/spar/cito/> .
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
"""
def wrap_token (t):
if t.startswith("http"):
return "<{}>".format(t)
else:
return "\"{}\"".format(t)
def write_triple (f, s, p, o):
line = "{} {} {} .\n".format(wrap_token(s), wrap_token(p), wrap_token(o))
f.write(line.encode("utf-8"))
if __name__ == "__main__":
filename = sys.argv[1]
term = sys.argv[2]
## load the JSON-LD context
with open("vocab.json", "r") as f:
context = json.load(f)
## write TTL results to a temporary file, for JSON-LD conversion later
f = tempfile.NamedTemporaryFile(delete=False)
f.write(TTL_PREAMBLE.encode("utf-8"))
# load the graph, collected triples related to the search term
graph = rdflib.Graph().parse(filename, format="n3")
for s, p, o in graph:
if s.endswith(term):
write_triple(f, s, p, o)
elif o.endswith(term):
write_triple(f, s, p, o)
f.close()
# serialize the graph as JSON-LD
graph = rdflib.Graph().parse(f.name, format="n3")
os.unlink(f.name)
response = graph.serialize(format="json-ld", context=context, indent=None)
print(response)
| [
"[email protected]"
] | |
39f792696bd72e0fde485771c5c094cc4889aeca | 5095047656d0c2e64f65d1236dbdd3e30ee091eb | /lintcode/easy/39_recover_rotated_sorted_array.py | 5edeabdc9b69503f8830e8a7a40bbc2c86ed449d | [] | no_license | simonfqy/SimonfqyGitHub | 3799fa9e868010864973700fdb8be5d37f6c2560 | fa3704af37d9e04ab6fd13b7b17cc83c239946f7 | refs/heads/master | 2023-04-05T00:33:00.989677 | 2023-03-29T06:58:21 | 2023-03-29T06:58:21 | 33,021,240 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 995 | py | '''
Link: https://www.lintcode.com/problem/recover-rotated-sorted-array/description
'''
# This is my own solution based on the teachings in Jiuzhang.com. It is sorting in-place.
class Solution:
"""
@param nums: An integer array
@return: nothing
"""
def recoverRotatedSortedArray(self, nums):
# write your code here
if not len(nums):
return
ind_smallest_ele = 0
smallest_ele = nums[0]
for i, num in enumerate(nums):
if num < smallest_ele:
ind_smallest_ele = i
smallest_ele = num
self.reverse(nums, 0, ind_smallest_ele - 1)
self.reverse(nums, ind_smallest_ele, len(nums) - 1)
self.reverse(nums, 0, len(nums) - 1)
def reverse(self, nums, start, end):
while start < end:
left_ele = nums[start]
nums[start] = nums[end]
nums[end] = left_ele
start += 1
end -= 1
| [
"[email protected]"
] | |
fb896906a0344813eb3d06af968407a0bb598b45 | 9f2f386a692a6ddeb7670812d1395a0b0009dad9 | /python/paddle/fluid/tests/unittests/test_traced_layer_err_msg.py | 5703ce131317699ab595efc086049d4ed549d7e4 | [
"Apache-2.0"
] | permissive | sandyhouse/Paddle | 2f866bf1993a036564986e5140e69e77674b8ff5 | 86e0b07fe7ee6442ccda0aa234bd690a3be2cffa | refs/heads/develop | 2023-08-16T22:59:28.165742 | 2022-06-03T05:23:39 | 2022-06-03T05:23:39 | 181,423,712 | 0 | 7 | Apache-2.0 | 2022-08-15T08:46:04 | 2019-04-15T06:15:22 | C++ | UTF-8 | Python | false | false | 9,716 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.fluid as fluid
import unittest
import paddle.nn as nn
import os
class SimpleFCLayer(nn.Layer):
def __init__(self, feature_size, batch_size, fc_size):
super(SimpleFCLayer, self).__init__()
self._linear = nn.Linear(feature_size, fc_size)
self._offset = paddle.to_tensor(
np.random.random((batch_size, fc_size)).astype('float32'))
def forward(self, x):
fc = self._linear(x)
return fc + self._offset
class LinearNetWithNone(nn.Layer):
def __init__(self, feature_size, fc_size):
super(LinearNetWithNone, self).__init__()
self._linear = nn.Linear(feature_size, fc_size)
def forward(self, x):
fc = self._linear(x)
return [fc, [None, 2]]
class TestTracedLayerErrMsg(unittest.TestCase):
def setUp(self):
self.batch_size = 4
self.feature_size = 3
self.fc_size = 2
self.layer = self._train_simple_net()
self.type_str = 'class'
def test_trace_err(self):
if fluid.framework.in_dygraph_mode():
return
with fluid.dygraph.guard():
in_x = fluid.dygraph.to_variable(
np.random.random((self.batch_size, self.feature_size)).astype(
'float32'))
with self.assertRaises(AssertionError) as e:
dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace(
None, [in_x])
self.assertEqual(
"The type of 'layer' in fluid.dygraph.jit.TracedLayer.trace must be fluid.dygraph.Layer, but received <{} 'NoneType'>.".
format(self.type_str), str(e.exception))
with self.assertRaises(TypeError) as e:
dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace(
self.layer, 3)
self.assertEqual(
"The type of 'each element of inputs' in fluid.dygraph.jit.TracedLayer.trace must be fluid.Variable, but received <{} 'int'>.".
format(self.type_str), str(e.exception))
with self.assertRaises(TypeError) as e:
dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace(
self.layer, [True, 1])
self.assertEqual(
"The type of 'each element of inputs' in fluid.dygraph.jit.TracedLayer.trace must be fluid.Variable, but received <{} 'bool'>.".
format(self.type_str), str(e.exception))
dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace(
self.layer, [in_x])
def test_set_strategy_err(self):
if fluid.framework.in_dygraph_mode():
return
with fluid.dygraph.guard():
in_x = fluid.dygraph.to_variable(
np.random.random((self.batch_size, self.feature_size)).astype(
'float32'))
dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace(
self.layer, [in_x])
with self.assertRaises(AssertionError) as e:
traced_layer.set_strategy(1, fluid.ExecutionStrategy())
self.assertEqual(
"The type of 'build_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.BuildStrategy, but received <{} 'int'>.".
format(self.type_str), str(e.exception))
with self.assertRaises(AssertionError) as e:
traced_layer.set_strategy(fluid.BuildStrategy(), False)
self.assertEqual(
"The type of 'exec_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.ExecutionStrategy, but received <{} 'bool'>.".
format(self.type_str), str(e.exception))
traced_layer.set_strategy(build_strategy=fluid.BuildStrategy())
traced_layer.set_strategy(exec_strategy=fluid.ExecutionStrategy())
traced_layer.set_strategy(fluid.BuildStrategy(),
fluid.ExecutionStrategy())
def test_save_inference_model_err(self):
if fluid.framework.in_dygraph_mode():
return
with fluid.dygraph.guard():
in_x = fluid.dygraph.to_variable(
np.random.random((self.batch_size, self.feature_size)).astype(
'float32'))
dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace(
self.layer, [in_x])
path = './traced_layer_err_msg'
with self.assertRaises(TypeError) as e:
traced_layer.save_inference_model([0])
self.assertEqual(
"The type of 'path' in fluid.dygraph.jit.TracedLayer.save_inference_model must be <{} 'str'>, but received <{} 'list'>. ".
format(self.type_str, self.type_str), str(e.exception))
with self.assertRaises(TypeError) as e:
traced_layer.save_inference_model(path, [0], [None])
self.assertEqual(
"The type of 'each element of fetch' in fluid.dygraph.jit.TracedLayer.save_inference_model must be <{} 'int'>, but received <{} 'NoneType'>. ".
format(self.type_str, self.type_str), str(e.exception))
with self.assertRaises(TypeError) as e:
traced_layer.save_inference_model(path, [0], False)
self.assertEqual(
"The type of 'fetch' in fluid.dygraph.jit.TracedLayer.save_inference_model must be (<{} 'NoneType'>, <{} 'list'>), but received <{} 'bool'>. ".
format(self.type_str, self.type_str, self.type_str),
str(e.exception))
with self.assertRaises(TypeError) as e:
traced_layer.save_inference_model(path, [None], [0])
self.assertEqual(
"The type of 'each element of feed' in fluid.dygraph.jit.TracedLayer.save_inference_model must be <{} 'int'>, but received <{} 'NoneType'>. ".
format(self.type_str, self.type_str), str(e.exception))
with self.assertRaises(TypeError) as e:
traced_layer.save_inference_model(path, True, [0])
self.assertEqual(
"The type of 'feed' in fluid.dygraph.jit.TracedLayer.save_inference_model must be (<{} 'NoneType'>, <{} 'list'>), but received <{} 'bool'>. ".
format(self.type_str, self.type_str, self.type_str),
str(e.exception))
with self.assertRaises(ValueError) as e:
traced_layer.save_inference_model("")
self.assertEqual(
"The input path MUST be format of dirname/file_prefix [dirname\\file_prefix in Windows system], "
"but received file_prefix is empty string.", str(e.exception))
traced_layer.save_inference_model(path)
def _train_simple_net(self):
layer = None
with fluid.dygraph.guard():
layer = SimpleFCLayer(self.feature_size, self.batch_size,
self.fc_size)
optimizer = fluid.optimizer.SGD(learning_rate=1e-3,
parameter_list=layer.parameters())
for i in range(5):
in_x = fluid.dygraph.to_variable(
np.random.random((self.batch_size, self.feature_size))
.astype('float32'))
dygraph_out = layer(in_x)
loss = fluid.layers.reduce_mean(dygraph_out)
loss.backward()
optimizer.minimize(loss)
return layer
class TestOutVarWithNoneErrMsg(unittest.TestCase):
def test_linear_net_with_none(self):
if fluid.framework.in_dygraph_mode():
return
model = LinearNetWithNone(100, 16)
in_x = paddle.to_tensor(np.random.random((4, 100)).astype('float32'))
with self.assertRaises(TypeError):
dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace(model,
[in_x])
class TestTracedLayerSaveInferenceModel(unittest.TestCase):
"""test save_inference_model will automaticlly create non-exist dir"""
def setUp(self):
self.save_path = "./nonexist_dir/fc"
import shutil
if os.path.exists(os.path.dirname(self.save_path)):
shutil.rmtree(os.path.dirname(self.save_path))
def test_mkdir_when_input_path_non_exist(self):
if fluid.framework.in_dygraph_mode():
return
fc_layer = SimpleFCLayer(3, 4, 2)
input_var = paddle.to_tensor(np.random.random([4, 3]).astype('float32'))
with fluid.dygraph.guard():
dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace(
fc_layer, inputs=[input_var])
self.assertFalse(os.path.exists(os.path.dirname(self.save_path)))
traced_layer.save_inference_model(self.save_path)
self.assertTrue(os.path.exists(os.path.dirname(self.save_path)))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
721df6fff854cdc440ec2a31030ed78152f343bc | 352454c055f91b6997e742bbda3e3a17580a499f | /src/scrub/__init__.py | 9ce55cb33f5646907c4bd090bfd3dea99fd1792c | [] | no_license | dushyantkhosla/ds-docker-walkthru-titanic | 533fcca39e54de7e904b41978437788c9206490e | 7534b29776f829633e4efcb999b37694ad9e27b3 | refs/heads/master | 2021-09-12T09:31:59.098462 | 2018-04-05T13:55:00 | 2018-04-05T13:55:00 | 117,235,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from .scrub import get_clean_data, scrub_raw_data
from .compress import compress_numeric, compress_categorical
from .clip import clip_categorical
| [
"[email protected]"
] | |
1a33515f51413fc03f8802cb84b6db01c0016b74 | 2b3ea7bb0df4be7f55d2ac188e23d801e497df8d | /fcsm_eos_api_client/models/aws_encrypted_password.py | 49a5f202a24b352dabcd4e1f2f72fa8919fc1b65 | [] | no_license | mikespub/fcsm-eos-api-client | 12b663b4e79ac5d86c2162dec168bfa240a85f0c | 107a3a7733c55ae6a750e32497268300c6be590e | refs/heads/master | 2020-08-01T18:13:17.229375 | 2019-10-29T14:30:56 | 2019-10-29T14:30:56 | 211,071,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,358 | py | # coding: utf-8
"""
Combined FCSM EOS API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.2.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class AwsEncryptedPassword(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'encrypted_password': 'str'
}
attribute_map = {
'encrypted_password': 'encryptedPassword'
}
def __init__(self, encrypted_password=None): # noqa: E501
"""AwsEncryptedPassword - a model defined in OpenAPI""" # noqa: E501
self._encrypted_password = None
self.discriminator = None
self.encrypted_password = encrypted_password
@property
def encrypted_password(self):
"""Gets the encrypted_password of this AwsEncryptedPassword. # noqa: E501
:return: The encrypted_password of this AwsEncryptedPassword. # noqa: E501
:rtype: str
"""
return self._encrypted_password
@encrypted_password.setter
def encrypted_password(self, encrypted_password):
"""Sets the encrypted_password of this AwsEncryptedPassword.
:param encrypted_password: The encrypted_password of this AwsEncryptedPassword. # noqa: E501
:type: str
"""
if encrypted_password is None:
raise ValueError("Invalid value for `encrypted_password`, must not be `None`") # noqa: E501
self._encrypted_password = encrypted_password
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AwsEncryptedPassword):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
88f358459d0853b847609698ac42ef8312da6936 | 4ec6ed4ebcb9346042669e6aa03be0e502ed48b3 | /leetcode/minimum-path-sum.py | 55df64015414dc0f842c2c148e4cf087b7cef48e | [] | no_license | shonihei/road-to-mastery | 79ed41cb1ad0dc2d0b454db2ccc7dd9567b03801 | 312bdf5101c3c1fc9a4d0b6762b5749ca57efe08 | refs/heads/master | 2021-01-22T19:59:17.038641 | 2017-11-16T15:21:55 | 2017-11-16T15:21:55 | 85,266,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | """
Given a m x n grid filled with non-negative numbers, find a path from top
left to bottom right which minimizes the sum of all numbers along its path.
Note: You can only move either down or right at any point in time.
"""
def minPathSum(grid):
for i in range(1, len(grid[0])):
grid[0][i] = grid[0][i] + grid[0][i - 1]
for i in range(1, len(grid)):
grid[i][0] = grid[i][0] + grid[i - 1][0]
for i in range(1, len(grid)):
for j in range(1, len(grid[0])):
grid[i][j] = min(grid[i - 1][j], grid[i][j - 1]) + grid[i][j]
return grid[-1][-1]
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test1(self):
self.assertEqual(minPathSum([[1, 2], [1, 1]]), 3)
unittest.main()
| [
"[email protected]"
] | |
f497c7f24447f373e69f0647e9382b1f70699ff9 | ead94ae26fa54b0a81ca7bf6bc9a32e2d6ec946c | /0x0C-python-almost_a_circle/models/base.py | 8bd97ec17fe9ebf1e2e3450eb7b49a39da972364 | [] | no_license | Ritapeace/holbertonschool-higher_level_programming | 4d02049843869695b67a148c0b58ec0063ab0bfc | c95a215dbaa07bc73b1e7c3e5a051a4a0afed1c8 | refs/heads/master | 2023-03-16T20:58:08.538187 | 2020-04-16T18:28:24 | 2020-04-16T18:28:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,561 | py | #!/usr/bin/python3
"""
main class of all the proyect
"""
# import turtle
import csv
import os
import json
class Base:
""" class base """
__nb_objects = 0
def __init__(self, id=None):
"""
class constructor
if id is None increment the private class attribute __nb_objects
else asign to self.id the value of id
"""
if id is None:
Base.__nb_objects += 1
self.id = Base.__nb_objects
else:
self.id = id
@staticmethod
def to_json_string(list_dictionaries):
""" to_json_string: return an object in json format"""
if list_dictionaries is None or len(list_dictionaries) == 0:
return ("[]")
else:
return(json.dumps(list_dictionaries))
@classmethod
def save_to_file(cls, list_objs):
""" save_to_fie: save in a fil the dicts of each instance passed"""
filename = cls.__name__ + ".json"
new = []
result = ""
with open(filename, 'w') as fd:
if list_objs is None:
result = cls.to_json_string(new)
else:
for elem in list_objs:
new.append(elem.to_dictionary())
result = cls.to_json_string(new)
fd.write(result)
@staticmethod
def from_json_string(json_string):
""" from_json_string: return an object from a json string"""
if json_string is None or len(json_string) == 0:
return ([])
else:
return (json.loads(json_string))
@classmethod
def create(cls, **dictionary):
"""
create: create a new instance depending of the cls.__name__
it is necesary to initialize the variables width, height
if it is Rectangle or size if it is Square
"""
if cls.__name__ == "Rectangle":
dummy = cls(2, 2)
if cls.__name__ == "Square":
dummy = cls(5)
dummy.update(**dictionary)
return (dummy)
@classmethod
def load_from_file(cls):
"""
load_from_file: reads fro file.json and returns the objects
"""
filename = cls.__name__ + ".json"
variable = ""
result = []
inst = []
if os.path.exists(filename) is True:
with open(filename, 'r') as fd:
variable = fd.read()
result = cls.from_json_string(variable)
for elem in result:
inst.append(cls.create(**elem))
return(inst)
else:
return (result)
@classmethod
def save_to_file_csv(cls, list_objs):
"""
save_to_file_csv: save a dir in a csv file
"""
filename = cls.__name__ + ".csv"
result = ""
new = []
big = []
with open(filename, 'w') as fd:
if list_objs is None:
result = csv.writer(fd, delimiter=',')
result.writerow([])
else:
result = csv.writer(fd, delimiter=',')
if cls.__name__ == "Rectangle":
for elem in list_objs:
new = ['id', 'width', 'height', 'x', 'y']
var = []
for i in new:
var.append(getattr(elem, i))
result.writerow(var)
if cls.__name__ == "Square":
for elem in list_objs:
new = ['id', 'size', 'x', 'y']
var = []
for i in new:
var.append(getattr(elem, i))
result.writerow(var)
@classmethod
def load_from_file_csv(cls):
"""
load_from_file_csv: loads froom csv file and create objects
"""
filename = cls.__name__ + ".csv"
inst = []
d = {}
if os.path.exists(filename) is True:
with open(filename) as fd:
result = csv.reader(fd, delimiter=',')
for row in result:
a = []
for elem in row:
a.append(int(elem))
if cls.__name__ == "Rectangle":
new = ['id', 'width', 'height', 'x', 'y']
for i in range(len(a)):
d[new[i]] = a[i]
inst.append(cls.create(**d))
if cls.__name__ == "Square":
new = ['id', 'size', 'x', 'y']
for i in range(len(a)):
d[new[i]] = a[i]
inst.append(cls.create(**d))
return(inst)
else:
return(result)
"""
@staticmethod
def draw(list_rectangles, list_squares):
turtle = turtle.Turtle()
for elem in list_rectangles:
turtle.goto(elem.x, elem.y)
for i in range(2):
turtle.up()
turtle.forward(elem.width)
turtle.left(90)
turtle.forward(elem.height)
turtle.left(90)
turtle.hidde()
for elem in list_squares:
turtle.goto(elem.x, elem.y)
for i in range(2):
turtle.up()
turtle.forward(elem.width)
turtle.left(90)
turtle.forward(elem.width)
turtle.left(90)
turtle.hidde()
turtle.done()
"""
| [
"[email protected]"
] | |
8e6d03f8372c860983813081ad8a53ea5ba0f293 | 4529dd6b9c257f00bf08301ea744be6f1b4c70ce | /blog/migrations/0001_initial.py | c8c588e7f5622ea448b2dab6521bb82bfde9590e | [] | no_license | ljingen/studycode | bb3bc8b031a8ab5d9a86dbeca7ad8a8c9a6a0bfa | c4a925cc3f60f2729eb5ee415ed513dd41569d88 | refs/heads/master | 2021-06-23T13:51:03.756646 | 2017-07-30T04:57:08 | 2017-07-30T04:57:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-26 10:05
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='标题')),
('body', models.TextField()),
('created_at', models.DateTimeField(default=datetime.datetime(2017, 7, 26, 10, 5, 56, 450747), verbose_name='添加时间')),
('updated_at', models.DateTimeField(default=datetime.datetime(2017, 7, 26, 10, 5, 56, 450774), verbose_name='修改时间')),
('status', models.CharField(choices=[('draft', '草稿'), ('public', '公开')], default='draft', max_length=8)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='姓名')),
('mail', models.EmailField(max_length=100, verbose_name='邮箱')),
],
),
migrations.AddField(
model_name='entry',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='entries', to='blog.User'),
),
]
| [
"[email protected]"
] | |
9526a8ec66dccafb2ee7d3b42af3c02cf18f4915 | 28a124b6a2f22a53af3b6bb754e77af88b4138e1 | /DJANGO/companytodo/reports/migrations/0002_auto_20191202_2322.py | 4d1cefcb2466476c55f4a795a2a681a753fc7215 | [] | no_license | mebaysan/LearningKitforBeginners-Python | f7c6668a9978b52cad6cc2b969990d7bbfedc376 | 9e1a47fb14b3d81c5b009b74432902090e213085 | refs/heads/master | 2022-12-21T03:12:19.892857 | 2021-06-22T11:58:27 | 2021-06-22T11:58:27 | 173,840,726 | 18 | 4 | null | 2022-12-10T03:00:22 | 2019-03-04T23:56:27 | Python | UTF-8 | Python | false | false | 862 | py | # Generated by Django 2.2.7 on 2019-12-02 20:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0002_product_description'),
('areas', '0001_initial'),
('reports', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='report',
name='product',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to='products.Product'),
preserve_default=False,
),
migrations.AddField(
model_name='report',
name='production_line',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to='areas.ProductionLine'),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
05577e96d6f81cfec509a48ce0200d500b62f248 | 99e44f844d78de330391f2b17bbf2e293bf24b1b | /pytorch/torch/nn/modules/rnn.py | f2c6f70c2419988d4edac0dccb4340489f864405 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | raghavnauhria/whatmt | be10d57bcd6134dd5714d0c4058abd56a1b35a13 | c20483a437c82936cb0fb8080925e37b9c4bba87 | refs/heads/master | 2022-12-04T05:39:24.601698 | 2019-07-22T09:43:30 | 2019-07-22T09:43:30 | 193,026,689 | 0 | 1 | MIT | 2022-11-28T17:50:19 | 2019-06-21T03:48:20 | C++ | UTF-8 | Python | false | false | 43,418 | py | import math
import torch
import warnings
import numbers
from .module import Module
from ..parameter import Parameter
from ..utils.rnn import PackedSequence, get_packed_sequence
from .. import init
from .. import _VF
from ..._jit_internal import weak_module, weak_script_method, weak_script, \
_parameter_list
_rnn_impls = {
'GRU': _VF.gru,
'RNN_TANH': _VF.rnn_tanh,
'RNN_RELU': _VF.rnn_relu,
}
@weak_script
def apply_permutation(tensor, permutation, dim=1):
# type: (Tensor, Tensor, int) -> Tensor
return tensor.index_select(dim, permutation)
class RNNBase(Module):
__constants__ = ['mode', 'input_size', 'hidden_size', 'num_layers', 'bias',
'batch_first', 'dropout', 'bidirectional']
def __init__(self, mode, input_size, hidden_size,
num_layers=1, bias=True, batch_first=False,
dropout=0., bidirectional=False):
super(RNNBase, self).__init__()
self.mode = mode
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = float(dropout)
self.bidirectional = bidirectional
num_directions = 2 if bidirectional else 1
if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or \
isinstance(dropout, bool):
raise ValueError("dropout should be a number in range [0, 1] "
"representing the probability of an element being "
"zeroed")
if dropout > 0 and num_layers == 1:
warnings.warn("dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
"num_layers greater than 1, but got dropout={} and "
"num_layers={}".format(dropout, num_layers))
if mode == 'LSTM':
gate_size = 4 * hidden_size
elif mode == 'GRU':
gate_size = 3 * hidden_size
elif mode == 'RNN_TANH':
gate_size = hidden_size
elif mode == 'RNN_RELU':
gate_size = hidden_size
else:
raise ValueError("Unrecognized RNN mode: " + mode)
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
layer_input_size = input_size if layer == 0 else hidden_size * num_directions
w_ih = Parameter(torch.Tensor(gate_size, layer_input_size))
w_hh = Parameter(torch.Tensor(gate_size, hidden_size))
b_ih = Parameter(torch.Tensor(gate_size))
# Second bias vector included for CuDNN compatibility. Only one
# bias vector is needed in standard definition.
b_hh = Parameter(torch.Tensor(gate_size))
layer_params = (w_ih, w_hh, b_ih, b_hh)
suffix = '_reverse' if direction == 1 else ''
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
if bias:
param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._all_weights.append(param_names)
self.flatten_parameters()
self.reset_parameters()
def flatten_parameters(self):
"""Resets parameter data pointer so that they can use faster code paths.
Right now, this works only if the module is on the GPU and cuDNN is enabled.
Otherwise, it's a no-op.
"""
any_param = next(self.parameters()).data
if not any_param.is_cuda or not torch.backends.cudnn.is_acceptable(any_param):
return
# If any parameters alias, we fall back to the slower, copying code path. This is
# a sufficient check, because overlapping parameter buffers that don't completely
# alias would break the assumptions of the uniqueness check in
# Module.named_parameters().
all_weights = self._flat_weights
unique_data_ptrs = set(p.data_ptr() for p in all_weights)
if len(unique_data_ptrs) != len(all_weights):
return
with torch.cuda.device_of(any_param):
import torch.backends.cudnn.rnn as rnn
# NB: This is a temporary hack while we still don't have Tensor
# bindings for ATen functions
with torch.no_grad():
# NB: this is an INPLACE function on all_weights, that's why the
# no_grad() is necessary.
torch._cudnn_rnn_flatten_weight(
all_weights, (4 if self.bias else 2),
self.input_size, rnn.get_cudnn_mode(self.mode), self.hidden_size, self.num_layers,
self.batch_first, bool(self.bidirectional))
def _apply(self, fn):
ret = super(RNNBase, self)._apply(fn)
self.flatten_parameters()
return ret
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
init.uniform_(weight, -stdv, stdv)
def _get_flat_weights_names(self):
return [weight for weights in self._all_weights for weight in weights]
@_parameter_list(_get_flat_weights_names)
def _get_flat_weights(self):
return self._flat_weights
@weak_script_method
def check_input(self, input, batch_sizes):
# type: (Tensor, Optional[Tensor]) -> None
expected_input_dim = 2 if batch_sizes is not None else 3
if input.dim() != expected_input_dim:
raise RuntimeError(
'input must have {} dimensions, got {}'.format(
expected_input_dim, input.dim()))
if self.input_size != input.size(-1):
raise RuntimeError(
'input.size(-1) must be equal to input_size. Expected {}, got {}'.format(
self.input_size, input.size(-1)))
@weak_script_method
def get_expected_hidden_size(self, input, batch_sizes):
# type: (Tensor, Optional[Tensor]) -> Tuple[int, int, int]
if batch_sizes is not None:
mini_batch = batch_sizes[0]
mini_batch = int(mini_batch)
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.hidden_size)
return expected_hidden_size
@weak_script_method
def check_hidden_size(self, hx, expected_hidden_size, msg='Expected hidden size {}, got {}'):
# type: (Tensor, Tuple[int, int, int], str) -> None
if hx.size() != expected_hidden_size:
raise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))
def check_forward_args(self, input, hidden, batch_sizes):
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
self.check_hidden_size(hidden, expected_hidden_size)
def permute_hidden(self, hx, permutation):
if permutation is None:
return hx
return apply_permutation(hx, permutation)
def forward(self, input, hx=None):
is_packed = isinstance(input, PackedSequence)
if is_packed:
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
else:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
num_directions = 2 if self.bidirectional else 1
hx = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
_impl = _rnn_impls[self.mode]
if batch_sizes is None:
result = _impl(input, hx, self._get_flat_weights(), self.bias, self.num_layers,
self.dropout, self.training, self.bidirectional, self.batch_first)
else:
result = _impl(input, batch_sizes, hx, self._get_flat_weights(), self.bias,
self.num_layers, self.dropout, self.training, self.bidirectional)
output = result[0]
hidden = result[1]
if is_packed:
output = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
def extra_repr(self):
s = '{input_size}, {hidden_size}'
if self.num_layers != 1:
s += ', num_layers={num_layers}'
if self.bias is not True:
s += ', bias={bias}'
if self.batch_first is not False:
s += ', batch_first={batch_first}'
if self.dropout != 0:
s += ', dropout={dropout}'
if self.bidirectional is not False:
s += ', bidirectional={bidirectional}'
return s.format(**self.__dict__)
def __setstate__(self, d):
super(RNNBase, self).__setstate__(d)
if 'all_weights' in d:
self._all_weights = d['all_weights']
if isinstance(self._all_weights[0][0], str):
return
num_layers = self.num_layers
num_directions = 2 if self.bidirectional else 1
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
suffix = '_reverse' if direction == 1 else ''
weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}', 'bias_ih_l{}{}', 'bias_hh_l{}{}']
weights = [x.format(layer, suffix) for x in weights]
if self.bias:
self._all_weights += [weights]
else:
self._all_weights += [weights[:2]]
@property
def _flat_weights(self):
return [p for layerparams in self.all_weights for p in layerparams]
@property
def all_weights(self):
return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
class RNN(RNNBase):
r"""Applies a multi-layer Elman RNN with :math:`tanh` or :math:`ReLU` non-linearity to an
input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
h_t = \text{tanh}(W_{ih} x_t + b_{ih} + W_{hh} h_{(t-1)} + b_{hh})
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is
the input at time `t`, and :math:`h_{(t-1)}` is the hidden state of the
previous layer at time `t-1` or the initial hidden state at time `0`.
If :attr:`nonlinearity` is ``'relu'``, then `ReLU` is used instead of `tanh`.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two RNNs together to form a `stacked RNN`,
with the second RNN taking in outputs of the first RNN and
computing the final results. Default: 1
nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as `(batch, seq, feature)`. Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
RNN layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional RNN. Default: ``False``
Inputs: input, h_0
- **input** of shape `(seq_len, batch, input_size)`: tensor containing the features
of the input sequence. The input can also be a packed variable length
sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence`
or :func:`torch.nn.utils.rnn.pack_sequence`
for details.
- **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the initial hidden state for each element in the batch.
Defaults to zero if not provided. If the RNN is bidirectional,
num_directions should be 2, else it should be 1.
Outputs: output, h_n
- **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor
containing the output features (`h_t`) from the last layer of the RNN,
for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has
been given as the input, the output will also be a packed sequence.
For the unpacked case, the directions can be separated
using ``output.view(seq_len, batch, num_directions, hidden_size)``,
with forward and backward being direction `0` and `1` respectively.
Similarly, the directions can be separated in the packed case.
- **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the hidden state for `t = seq_len`.
Like *output*, the layers can be separated using
``h_n.view(num_layers, num_directions, batch, hidden_size)``.
Shape:
- Input1: :math:`(L, N, H_{in})` tensor containing input features where
:math:`H_{in}=\text{input\_size}` and `L` represents a sequence length.
- Input2: :math:`(S, N, H_{out})` tensor
containing the initial hidden state for each element in the batch.
:math:`H_{out}=\text{hidden\_size}`
Defaults to zero if not provided. where :math:`S=\text{num\_layers} * \text{num\_directions}`
If the RNN is bidirectional, num_directions should be 2, else it should be 1.
- Output1: :math:`(L, N, H_{all})` where :math:`H_{all}=\text{num\_directions} * \text{hidden\_size}`
- Output2: :math:`(S, N, H_{out})` tensor containing the next hidden state
for each element in the batch
Attributes:
weight_ih_l[k]: the learnable input-hidden weights of the k-th layer,
of shape `(hidden_size, input_size)` for `k = 0`. Otherwise, the shape is
`(hidden_size, num_directions * hidden_size)`
weight_hh_l[k]: the learnable hidden-hidden weights of the k-th layer,
of shape `(hidden_size, hidden_size)`
bias_ih_l[k]: the learnable input-hidden bias of the k-th layer,
of shape `(hidden_size)`
bias_hh_l[k]: the learnable hidden-hidden bias of the k-th layer,
of shape `(hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. include:: cudnn_persistent_rnn.rst
Examples::
>>> rnn = nn.RNN(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> output, hn = rnn(input, h0)
"""
def __init__(self, *args, **kwargs):
if 'nonlinearity' in kwargs:
if kwargs['nonlinearity'] == 'tanh':
mode = 'RNN_TANH'
elif kwargs['nonlinearity'] == 'relu':
mode = 'RNN_RELU'
else:
raise ValueError("Unknown nonlinearity '{}'".format(
kwargs['nonlinearity']))
del kwargs['nonlinearity']
else:
mode = 'RNN_TANH'
super(RNN, self).__init__(mode, *args, **kwargs)
@weak_module
class LSTM(RNNBase):
r"""Applies a multi-layer long short-term memory (LSTM) RNN to an input
sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll} \\
i_t = \sigma(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\
f_t = \sigma(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\
g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hg} h_{(t-1)} + b_{hg}) \\
o_t = \sigma(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\
c_t = f_t * c_{(t-1)} + i_t * g_t \\
h_t = o_t * \tanh(c_t) \\
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the cell
state at time `t`, :math:`x_t` is the input at time `t`, :math:`h_{(t-1)}`
is the hidden state of the layer at time `t-1` or the initial hidden
state at time `0`, and :math:`i_t`, :math:`f_t`, :math:`g_t`,
:math:`o_t` are the input, forget, cell, and output gates, respectively.
:math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
In a multilayer LSTM, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
(:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
variable which is :math:`0` with probability :attr:`dropout`.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two LSTMs together to form a `stacked LSTM`,
with the second LSTM taking in outputs of the first LSTM and
computing the final results. Default: 1
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
LSTM layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional LSTM. Default: ``False``
Inputs: input, (h_0, c_0)
- **input** of shape `(seq_len, batch, input_size)`: tensor containing the features
of the input sequence.
The input can also be a packed variable length sequence.
See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
:func:`torch.nn.utils.rnn.pack_sequence` for details.
- **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the initial hidden state for each element in the batch.
If the LSTM is bidirectional, num_directions should be 2, else it should be 1.
- **c_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the initial cell state for each element in the batch.
If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
Outputs: output, (h_n, c_n)
- **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor
containing the output features `(h_t)` from the last layer of the LSTM,
for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has been
given as the input, the output will also be a packed sequence.
For the unpacked case, the directions can be separated
using ``output.view(seq_len, batch, num_directions, hidden_size)``,
with forward and backward being direction `0` and `1` respectively.
Similarly, the directions can be separated in the packed case.
- **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the hidden state for `t = seq_len`.
Like *output*, the layers can be separated using
``h_n.view(num_layers, num_directions, batch, hidden_size)`` and similarly for *c_n*.
- **c_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the cell state for `t = seq_len`.
Attributes:
weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
`(W_ii|W_if|W_ig|W_io)`, of shape `(4*hidden_size, input_size)` for `k = 0`.
Otherwise, the shape is `(4*hidden_size, num_directions * hidden_size)`
weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
`(W_hi|W_hf|W_hg|W_ho)`, of shape `(4*hidden_size, hidden_size)`
bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
`(b_ii|b_if|b_ig|b_io)`, of shape `(4*hidden_size)`
bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
`(b_hi|b_hf|b_hg|b_ho)`, of shape `(4*hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. include:: cudnn_persistent_rnn.rst
Examples::
>>> rnn = nn.LSTM(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> c0 = torch.randn(2, 3, 20)
>>> output, (hn, cn) = rnn(input, (h0, c0))
"""
__overloads__ = {'forward': ['forward_packed', 'forward_tensor']}
def __init__(self, *args, **kwargs):
super(LSTM, self).__init__('LSTM', *args, **kwargs)
@weak_script_method
def check_forward_args(self, input, hidden, batch_sizes):
# type: (Tensor, Tuple[Tensor, Tensor], Optional[Tensor]) -> None
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
self.check_hidden_size(hidden[0], expected_hidden_size,
'Expected hidden[0] size {}, got {}')
self.check_hidden_size(hidden[1], expected_hidden_size,
'Expected hidden[1] size {}, got {}')
@weak_script_method
def permute_hidden(self, hx, permutation):
# type: (Tuple[Tensor, Tensor], Optional[Tensor]) -> Tuple[Tensor, Tensor]
if permutation is None:
return hx
return apply_permutation(hx[0], permutation), apply_permutation(hx[1], permutation)
@weak_script_method
def forward_impl(self, input, hx, batch_sizes, max_batch_size, sorted_indices):
# type: (Tensor, Optional[Tuple[Tensor, Tensor]], Optional[Tensor], int, Optional[Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]] # noqa
if hx is None:
num_directions = 2 if self.bidirectional else 1
zeros = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
hx = (zeros, zeros)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
if batch_sizes is None:
result = _VF.lstm(input, hx, self._get_flat_weights(), self.bias, self.num_layers,
self.dropout, self.training, self.bidirectional, self.batch_first)
else:
result = _VF.lstm(input, batch_sizes, hx, self._get_flat_weights(), self.bias,
self.num_layers, self.dropout, self.training, self.bidirectional)
output = result[0]
hidden = result[1:]
return output, hidden
@weak_script_method
def forward_tensor(self, input, hx=None):
# type: (Tensor, Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
@weak_script_method
def forward_packed(self, input, hx=None):
# type: (Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]], Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]], Tuple[Tensor, Tensor]] # noqa
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices)
output = get_packed_sequence(output, batch_sizes, sorted_indices, unsorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
def forward(self, input, hx=None):
if isinstance(input, PackedSequence):
return self.forward_packed(input, hx)
else:
return self.forward_tensor(input, hx)
class GRU(RNNBase):
r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll}
r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\
h_t = (1 - z_t) * n_t + z_t * h_{(t-1)}
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input
at time `t`, :math:`h_{(t-1)}` is the hidden state of the layer
at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`,
:math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively.
:math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
In a multilayer GRU, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
(:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
variable which is :math:`0` with probability :attr:`dropout`.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two GRUs together to form a `stacked GRU`,
with the second GRU taking in outputs of the first GRU and
computing the final results. Default: 1
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
GRU layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional GRU. Default: ``False``
Inputs: input, h_0
- **input** of shape `(seq_len, batch, input_size)`: tensor containing the features
of the input sequence. The input can also be a packed variable length
sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence`
for details.
- **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the initial hidden state for each element in the batch.
Defaults to zero if not provided. If the RNN is bidirectional,
num_directions should be 2, else it should be 1.
Outputs: output, h_n
- **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor
containing the output features h_t from the last layer of the GRU,
for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has been
given as the input, the output will also be a packed sequence.
For the unpacked case, the directions can be separated
using ``output.view(seq_len, batch, num_directions, hidden_size)``,
with forward and backward being direction `0` and `1` respectively.
Similarly, the directions can be separated in the packed case.
- **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the hidden state for `t = seq_len`
Like *output*, the layers can be separated using
``h_n.view(num_layers, num_directions, batch, hidden_size)``.
Shape:
- Input1: :math:`(L, N, H_{in})` tensor containing input features where
:math:`H_{in}=\text{input\_size}` and `L` represents a sequence length.
- Input2: :math:`(S, N, H_{out})` tensor
containing the initial hidden state for each element in the batch.
:math:`H_{out}=\text{hidden\_size}`
Defaults to zero if not provided. where :math:`S=\text{num\_layers} * \text{num\_directions}`
If the RNN is bidirectional, num_directions should be 2, else it should be 1.
- Output1: :math:`(L, N, H_{all})` where :math:`H_{all}=\text{num\_directions} * \text{hidden\_size}`
- Output2: :math:`(S, N, H_{out})` tensor containing the next hidden state
for each element in the batch
Attributes:
weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
(W_ir|W_iz|W_in), of shape `(3*hidden_size, input_size)` for `k = 0`.
Otherwise, the shape is `(3*hidden_size, num_directions * hidden_size)`
weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
(W_hr|W_hz|W_hn), of shape `(3*hidden_size, hidden_size)`
bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
(b_ir|b_iz|b_in), of shape `(3*hidden_size)`
bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
(b_hr|b_hz|b_hn), of shape `(3*hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. include:: cudnn_persistent_rnn.rst
Examples::
>>> rnn = nn.GRU(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> output, hn = rnn(input, h0)
"""
def __init__(self, *args, **kwargs):
super(GRU, self).__init__('GRU', *args, **kwargs)
class RNNCellBase(Module):
__constants__ = ['input_size', 'hidden_size', 'bias']
def __init__(self, input_size, hidden_size, bias, num_chunks):
super(RNNCellBase, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = Parameter(torch.Tensor(num_chunks * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(num_chunks * hidden_size, hidden_size))
if bias:
self.bias_ih = Parameter(torch.Tensor(num_chunks * hidden_size))
self.bias_hh = Parameter(torch.Tensor(num_chunks * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
self.reset_parameters()
def extra_repr(self):
s = '{input_size}, {hidden_size}'
if 'bias' in self.__dict__ and self.bias is not True:
s += ', bias={bias}'
if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh":
s += ', nonlinearity={nonlinearity}'
return s.format(**self.__dict__)
@weak_script_method
def check_forward_input(self, input):
if input.size(1) != self.input_size:
raise RuntimeError(
"input has inconsistent input_size: got {}, expected {}".format(
input.size(1), self.input_size))
@weak_script_method
def check_forward_hidden(self, input, hx, hidden_label=''):
# type: (Tensor, Tensor, str) -> None
if input.size(0) != hx.size(0):
raise RuntimeError(
"Input batch size {} doesn't match hidden{} batch size {}".format(
input.size(0), hidden_label, hx.size(0)))
if hx.size(1) != self.hidden_size:
raise RuntimeError(
"hidden{} has inconsistent hidden_size: got {}, expected {}".format(
hidden_label, hx.size(1), self.hidden_size))
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
init.uniform_(weight, -stdv, stdv)
@weak_module
class RNNCell(RNNCellBase):
r"""An Elman RNN cell with tanh or ReLU non-linearity.
.. math::
h' = \tanh(W_{ih} x + b_{ih} + W_{hh} h + b_{hh})
If :attr:`nonlinearity` is `'relu'`, then ReLU is used in place of tanh.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
Inputs: input, hidden
- **input** of shape `(batch, input_size)`: tensor containing input features
- **hidden** of shape `(batch, hidden_size)`: tensor containing the initial hidden
state for each element in the batch.
Defaults to zero if not provided.
Outputs: h'
- **h'** of shape `(batch, hidden_size)`: tensor containing the next hidden state
for each element in the batch
Shape:
- Input1: :math:`(N, H_{in})` tensor containing input features where
:math:`H_{in}` = `input_size`
- Input2: :math:`(N, H_{out})` tensor containing the initial hidden
state for each element in the batch where :math:`H_{out}` = `hidden_size`
Defaults to zero if not provided.
- Output: :math:`(N, H_{out})` tensor containing the next hidden state
for each element in the batch
Attributes:
weight_ih: the learnable input-hidden weights, of shape
`(hidden_size, input_size)`
weight_hh: the learnable hidden-hidden weights, of shape
`(hidden_size, hidden_size)`
bias_ih: the learnable input-hidden bias, of shape `(hidden_size)`
bias_hh: the learnable hidden-hidden bias, of shape `(hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
Examples::
>>> rnn = nn.RNNCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
hx = rnn(input[i], hx)
output.append(hx)
"""
__constants__ = ['input_size', 'hidden_size', 'bias', 'nonlinearity']
def __init__(self, input_size, hidden_size, bias=True, nonlinearity="tanh"):
super(RNNCell, self).__init__(input_size, hidden_size, bias, num_chunks=1)
self.nonlinearity = nonlinearity
@weak_script_method
def forward(self, input, hx=None):
# type: (Tensor, Optional[Tensor]) -> Tensor
self.check_forward_input(input)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
self.check_forward_hidden(input, hx, '')
if self.nonlinearity == "tanh":
ret = _VF.rnn_tanh_cell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
)
elif self.nonlinearity == "relu":
ret = _VF.rnn_relu_cell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
)
else:
ret = input # TODO: remove when jit supports exception flow
raise RuntimeError(
"Unknown nonlinearity: {}".format(self.nonlinearity))
return ret
@weak_module
class LSTMCell(RNNCellBase):
r"""A long short-term memory (LSTM) cell.
.. math::
\begin{array}{ll}
i = \sigma(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\
f = \sigma(W_{if} x + b_{if} + W_{hf} h + b_{hf}) \\
g = \tanh(W_{ig} x + b_{ig} + W_{hg} h + b_{hg}) \\
o = \sigma(W_{io} x + b_{io} + W_{ho} h + b_{ho}) \\
c' = f * c + i * g \\
h' = o * \tanh(c') \\
\end{array}
where :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
bias: If ``False``, then the layer does not use bias weights `b_ih` and
`b_hh`. Default: ``True``
Inputs: input, (h_0, c_0)
- **input** of shape `(batch, input_size)`: tensor containing input features
- **h_0** of shape `(batch, hidden_size)`: tensor containing the initial hidden
state for each element in the batch.
- **c_0** of shape `(batch, hidden_size)`: tensor containing the initial cell state
for each element in the batch.
If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
Outputs: (h_1, c_1)
- **h_1** of shape `(batch, hidden_size)`: tensor containing the next hidden state
for each element in the batch
- **c_1** of shape `(batch, hidden_size)`: tensor containing the next cell state
for each element in the batch
Attributes:
weight_ih: the learnable input-hidden weights, of shape
`(4*hidden_size, input_size)`
weight_hh: the learnable hidden-hidden weights, of shape
`(4*hidden_size, hidden_size)`
bias_ih: the learnable input-hidden bias, of shape `(4*hidden_size)`
bias_hh: the learnable hidden-hidden bias, of shape `(4*hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
Examples::
>>> rnn = nn.LSTMCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> cx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
hx, cx = rnn(input[i], (hx, cx))
output.append(hx)
"""
def __init__(self, input_size, hidden_size, bias=True):
super(LSTMCell, self).__init__(input_size, hidden_size, bias, num_chunks=4)
@weak_script_method
def forward(self, input, hx=None):
# type: (Tensor, Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tensor]
self.check_forward_input(input)
if hx is None:
zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
hx = (zeros, zeros)
self.check_forward_hidden(input, hx[0], '[0]')
self.check_forward_hidden(input, hx[1], '[1]')
return _VF.lstm_cell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
)
@weak_module
class GRUCell(RNNCellBase):
r"""A gated recurrent unit (GRU) cell
.. math::
\begin{array}{ll}
r = \sigma(W_{ir} x + b_{ir} + W_{hr} h + b_{hr}) \\
z = \sigma(W_{iz} x + b_{iz} + W_{hz} h + b_{hz}) \\
n = \tanh(W_{in} x + b_{in} + r * (W_{hn} h + b_{hn})) \\
h' = (1 - z) * n + z * h
\end{array}
where :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
bias: If ``False``, then the layer does not use bias weights `b_ih` and
`b_hh`. Default: ``True``
Inputs: input, hidden
- **input** of shape `(batch, input_size)`: tensor containing input features
- **hidden** of shape `(batch, hidden_size)`: tensor containing the initial hidden
state for each element in the batch.
Defaults to zero if not provided.
Outputs: h'
- **h'** of shape `(batch, hidden_size)`: tensor containing the next hidden state
for each element in the batch
Shape:
- Input1: :math:`(N, H_{in})` tensor containing input features where
:math:`H_{in}` = `input_size`
- Input2: :math:`(N, H_{out})` tensor containing the initial hidden
state for each element in the batch where :math:`H_{out}` = `hidden_size`
Defaults to zero if not provided.
- Output: :math:`(N, H_{out})` tensor containing the next hidden state
for each element in the batch
Attributes:
weight_ih: the learnable input-hidden weights, of shape
`(3*hidden_size, input_size)`
weight_hh: the learnable hidden-hidden weights, of shape
`(3*hidden_size, hidden_size)`
bias_ih: the learnable input-hidden bias, of shape `(3*hidden_size)`
bias_hh: the learnable hidden-hidden bias, of shape `(3*hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
Examples::
>>> rnn = nn.GRUCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
hx = rnn(input[i], hx)
output.append(hx)
"""
def __init__(self, input_size, hidden_size, bias=True):
super(GRUCell, self).__init__(input_size, hidden_size, bias, num_chunks=3)
@weak_script_method
def forward(self, input, hx=None):
# type: (Tensor, Optional[Tensor]) -> Tensor
self.check_forward_input(input)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
self.check_forward_hidden(input, hx, '')
return _VF.gru_cell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
)
| [
"[email protected]"
] | |
4b89b693039712326e41f12efd84ada959490eb8 | c8335705ff06641622668c9b0a3020df9213bc77 | /core/migrations/0005_homepage_hero_image.py | b81f7fd0a13311af70fe4bc543ec37280b9382e4 | [] | no_license | Richardh36/ANS | 0adedcc760a6acbf539c8cbedde8edc28186218a | 2c46d36cf349f3ab8556bf713d2a0125c415029a | refs/heads/master | 2016-09-11T02:42:21.952145 | 2015-05-03T14:03:10 | 2015-05-03T14:03:10 | 34,852,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0005_make_filter_spec_unique'),
('core', '0004_auto_20150501_1528'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='hero_image',
field=models.ForeignKey(to='wagtailimages.Image', null=True, on_delete=django.db.models.deletion.SET_NULL),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
23e548b8f334899f6c7d8858f53e3ce226a009be | 2846534de9c66457b5f33ad2898a90688a621088 | /numba/minivect/miniast.py | 0fc3b0125b6d3f10572a3679b54b6287b4bda9cf | [
"BSD-2-Clause"
] | permissive | yarikoptic/numba | c6f190aba55647beca4be527e95020918d24063d | 46885bfb326640581afa11abd4bd5f73d6083468 | refs/heads/master | 2021-01-17T06:59:27.793042 | 2012-10-24T01:27:50 | 2012-10-24T01:27:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,687 | py | """
This module provides the AST. Subclass :py:class:`Context` and override the
various methods to allow minivect visitors over the AST, to promote and map types,
etc. Subclass and override :py:class:`ASTBuilder`'s methods to provide alternative
AST nodes or different implementations.
"""
import copy
import string
import types
import minitypes
import miniutils
import minivisitor
import specializers
import type_promoter
import minicode
import codegen
import llvm_codegen
import graphviz
try:
import llvm.core
import llvm.ee
import llvm.passes
except ImportError:
llvm = None
class UndocClassAttribute(object):
"Use this to document class attributes for Sphinx"
def __init__(self, cls):
self.cls = cls
def __call__(self, *args, **kwargs):
return self.cls(*args, **kwargs)
def make_cls(cls1, cls2):
"Fuse two classes together."
name = "%s_%s" % (cls1.__name__, cls2.__name__)
return type(name, (cls1, cls2), {})
class Context(object):
"""
A context that knows how to map ASTs back and forth, how to wrap nodes
and types, and how to instantiate a code generator for specialization.
An opaque_node or foreign node is a node that is not from our AST,
and a normal node is one that has a interface compatible with ours.
To provide custom functionality, set the following attributes, or
subclass this class.
:param astbuilder: the :py:class:`ASTBuilder` or ``None``
:param typemapper: the :py:class:`minivect.minitypes.Typemapper` or
``None`` for the default.
.. attribute:: codegen_cls
The code generator class that is used to generate code.
The default is :py:class:`minivect.codegen.CodeGen`
.. attribute:: cleanup_codegen_cls
The code generator that generates code to dispose of any
garbage (e.g. intermediate object temporaries).
The default is :py:class:`minivect.codegen.CodeGenCleanup`
.. attribute:: codewriter_cls
The code writer that the code generator writes its generated code
to. This may be strings or arbitrary objects.
The default is :py:class:`minivect.minicode.CodeWriter`, which accepts
arbitrary objects.
.. attribute:: codeformatter_cls
A formatter to format the generated code.
The default is :py:class:`minivect.minicode.CodeFormatter`,
which returns a list of objects written. Set this to
:py:class:`minivect.minicode.CodeStringFormatter`
to have the strings joined together.
.. attribute:: specializer_mixin_cls
A specializer mixin class that can override or intercept
functionality. This class should likely participate
cooperatively in MI.
.. attribute:: variable_resolving_mixin_cls
A specializer mixin class that resolves wrapped miniasts in a foreign
AST. This is only needed if you are using :py:class:`NodeWrapper`,
which wraps a miniast somewhere at the leaves.
.. attribute: graphviz_cls
Visitor to generate a Graphviz graph. See the :py:module:`graphviz`
module.
.. attribute: minifunction
The current minifunction that is being translated.
Use subclass :py:class:`CContext` to get the defaults for C code generation.
"""
debug = False
debug_elements = False
use_llvm = False
optimize_broadcasting = True
shape_type = minitypes.Py_ssize_t.pointer()
strides_type = shape_type
astbuilder_cls = None
codegen_cls = UndocClassAttribute(codegen.VectorCodegen)
cleanup_codegen_cls = UndocClassAttribute(codegen.CodeGenCleanup)
codewriter_cls = UndocClassAttribute(minicode.CodeWriter)
codeformatter_cls = UndocClassAttribute(minicode.CodeFormatter)
graphviz_cls = UndocClassAttribute(graphviz.GraphvizGenerator)
specializer_mixin_cls = None
variable_resolving_mixin_cls = None
func_counter = 0
final_specializer = specializers.FinalSpecializer
def __init__(self):
self.init()
if self.use_llvm:
if llvm is None:
import llvm.core as llvm_py_not_available # llvm-py not available
self.llvm_module = llvm.core.Module.new('default_module')
# self.llvm_ee = llvm.ee.ExecutionEngine.new(self.llvm_module)
self.llvm_ee = llvm.ee.EngineBuilder.new(self.llvm_module).force_jit().opt(3).create()
self.llvm_fpm = llvm.passes.FunctionPassManager.new(self.llvm_module)
self.llvm_fpm.initialize()
if not self.debug:
for llvm_pass in self.llvm_passes():
self.llvm_fpm.add(llvm_pass)
else:
self.llvm_ee = None
self.llvm_module = None
def init(self):
self.astbuilder = self.astbuilder_cls(self)
self.typemapper = minitypes.TypeMapper(self)
def run_opaque(self, astmapper, opaque_ast, specializers):
return self.run(astmapper.visit(opaque_ast), specializers)
def run(self, ast, specializer_classes, graphviz_outfile=None,
print_tree=False):
"""
Specialize the given AST with all given specializers and return
an iterable of generated code in the form of
``(specializer, new_ast, codewriter, code_obj)``
The code_obj is the generated code (e.g. a string of C code),
depending on the code formatter used.
"""
for specializer_class in specializer_classes:
self.init()
pipeline = self.pipeline(specializer_class)
specialized_ast = specializers.specialize_ast(ast)
self.astbuilder.minifunction = specialized_ast
for transform in pipeline:
specialized_ast = transform.visit(specialized_ast)
if print_tree:
specialized_ast.print_tree(self)
if graphviz_outfile is not None:
data = self.graphviz(specialized_ast)
graphviz_outfile.write(data)
codewriter = self.codewriter_cls(self)
codegen = self.codegen_cls(self, codewriter)
codegen.visit(specialized_ast)
yield (pipeline[0], specialized_ast, codewriter,
self.codeformatter_cls().format(codewriter))
def debug_c(self, ast, specializer, astbuilder_cls=None):
"Generate C code (for debugging)"
context = CContext()
if astbuilder_cls:
context.astbuilder_cls = astbuilder_cls
else:
context.astbuilder_cls = self.astbuilder_cls
context.shape_type = self.shape_type
context.strides_type = self.strides_type
context.debug = self.debug
result = context.run(ast, [specializer]).next()
_, specialized_ast, _, (proto, impl) = result
return impl
def pipeline(self, specializer_class):
# add specializer mixin and run specializer
if self.specializer_mixin_cls:
specializer_class = make_cls(self.specializer_mixin_cls,
specializer_class)
specializer = specializer_class(self)
pipeline = [specializer]
# Add variable resolving mixin to the final specializer and run
# transform
final_specializer_cls = self.final_specializer
if final_specializer_cls:
if self.variable_resolving_mixin_cls:
final_specializer_cls = make_cls(
self.variable_resolving_mixin_cls,
final_specializer_cls)
pipeline.append(final_specializer_cls(self, specializer))
pipeline.append(type_promoter.TypePromoter(self))
return pipeline
def generate_disposal_code(self, code, node):
"Run the disposal code generator on an (sub)AST"
transform = self.cleanup_codegen_cls(self, code)
transform.visit(node)
#
### Override in subclasses where needed
#
def llvm_passes(self):
"Returns a list of LLVM optimization passes"
return []
return [
# llvm.passes.PASS_CFG_SIMPLIFICATION
llvm.passes.PASS_BLOCK_PLACEMENT,
llvm.passes.PASS_BASIC_ALIAS_ANALYSIS,
llvm.passes.PASS_NO_AA,
llvm.passes.PASS_SCALAR_EVOLUTION_ALIAS_ANALYSIS,
# llvm.passes.PASS_ALIAS_ANALYSIS_COUNTER,
llvm.passes.PASS_AAEVAL,
llvm.passes.PASS_LOOP_DEPENDENCE_ANALYSIS,
llvm.passes.PASS_BREAK_CRITICAL_EDGES,
llvm.passes.PASS_LOOP_SIMPLIFY,
llvm.passes.PASS_PROMOTE_MEMORY_TO_REGISTER,
llvm.passes.PASS_CONSTANT_PROPAGATION,
llvm.passes.PASS_LICM,
# llvm.passes.PASS_CONSTANT_MERGE,
llvm.passes.PASS_LOOP_STRENGTH_REDUCE,
llvm.passes.PASS_LOOP_UNROLL,
# llvm.passes.PASS_FUNCTION_ATTRS,
# llvm.passes.PASS_GLOBAL_OPTIMIZER,
# llvm.passes.PASS_GLOBAL_DCE,
llvm.passes.PASS_DEAD_CODE_ELIMINATION,
llvm.passes.PASS_INSTRUCTION_COMBINING,
llvm.passes.PASS_CODE_GEN_PREPARE,
]
def mangle_function_name(self, name):
name = "%s_%d" % (name, self.func_counter)
self.func_counter += 1
return name
def promote_types(self, type1, type2):
"Promote types in an arithmetic operation"
if type1 == type2:
return type1
return self.typemapper.promote_types(type1, type2)
def getchildren(self, node):
"Implement to allow a minivisitor.Visitor over a foreign AST."
return node.child_attrs
def getpos(self, opaque_node):
"Get the position of a foreign node"
filename, line, col = opaque_node.pos
return Position(filename, line, col)
def gettype(self, opaque_node):
"Get a type of a foreign node"
return opaque_node.type
def may_error(self, opaque_node):
"Return whether this node may result in an exception."
raise NotImplementedError
def declare_type(self, type):
"Return a declaration for a type"
raise NotImplementedError
def to_llvm(self, type):
"Return an LLVM type for the given minitype"
return self.typemapper.to_llvm(type)
def graphviz(self, node, graphviz_name="AST"):
visitor = self.graphviz_cls(self, graphviz_name)
graphviz_graph = visitor.visit(node)
return graphviz_graph.to_string()
def is_object(self, type):
return isinstance(type, minitypes.ObjectType)
class CContext(Context):
"Set defaults for C code generation."
codegen_cls = codegen.VectorCodegen
codewriter_cls = minicode.CCodeWriter
codeformatter_cls = minicode.CCodeStringFormatter
class LLVMContext(Context):
"Context with default for LLVM code generation"
use_llvm = True
codegen_cls = llvm_codegen.LLVMCodeGen
class ASTBuilder(object):
"""
This class is used to build up a minivect AST. It can be used by a user
from a transform or otherwise, but the important bit is that we use it
in our code to build up an AST that can be overridden by the user,
and which makes it convenient to build up complex ASTs concisely.
"""
# the 'pos' attribute is set for each visit to each node by
# the ASTMapper
pos = None
temp_reprname_counter = 0
def __init__(self, context):
"""
:param context: the :py:class:`Context`
"""
self.context = context
def _infer_type(self, value):
"Used to infer types for self.constant()"
if isinstance(value, (int, long)):
return minitypes.IntType()
elif isinstance(value, float):
return minitypes.FloatType()
elif isinstance(value, str):
return minitypes.CStringType()
else:
raise minierror.InferTypeError()
def create_function_type(self, function, strides_args=True):
arg_types = []
for arg in function.arguments + function.scalar_arguments:
if arg.used:
if arg.type and arg.type.is_array and not strides_args:
arg_types.append(arg.data_pointer.type)
arg.variables = [arg.data_pointer]
else:
for variable in arg.variables:
arg_types.append(variable.type)
function.type = minitypes.FunctionType(
return_type=function.success_value.type, args=arg_types)
def function(self, name, body, args, shapevar=None, posinfo=None,
omp_size=None):
"""
Create a new function.
:type name: str
:param name: name of the function
:type args: [:py:class:`FunctionArgument`]
:param args: all array and scalar arguments to the function, excluding
shape or position information.
:param shapevar: the :py:class:`Variable` for the total broadcast shape
If ``None``, a default of ``Py_ssize_t *`` is assumed.
:type posinfo: :py:class:`FunctionArgument`
:param posinfo: if given, this will be the second, third and fourth
arguments to the function ``(filename, lineno, column)``.
"""
if shapevar is None:
shapevar = self.variable(self.context.shape_type, 'shape')
arguments, scalar_arguments = [], []
for arg in args:
if arg.type.is_array:
arguments.append(arg)
else:
scalar_arguments.append(arg)
arguments.insert(0, self.funcarg(shapevar))
if posinfo:
arguments.insert(1, posinfo)
body = self.stats(self.nditerate(body))
error_value = self.constant(-1)
success_value = self.constant(0)
function = FunctionNode(self.pos, name, body,
arguments, scalar_arguments,
shapevar, posinfo,
error_value=error_value,
success_value=success_value,
omp_size=omp_size or self.constant(1024))
# prepending statements, used during specialization
function.prepending = self.stats()
function.body = self.stats(function.prepending, function.body)
self.create_function_type(function)
return function
def build_function(self, variables, body, name=None, shapevar=None):
"Convenience method for building a minivect function"
args = []
for var in variables:
if var.type.is_array:
args.append(self.array_funcarg(var))
else:
args.append(self.funcarg(var))
name = name or 'function'
return self.function(name, body, args, shapevar=shapevar)
def funcarg(self, variable, *variables):
"""
Create a (compound) function argument consisting of one or multiple
argument Variables.
"""
if variable.type is not None and variable.type.is_array:
assert not variables
return self.array_funcarg(variable)
if not variables:
variables = [variable]
return FunctionArgument(self.pos, variable, list(variables))
def array_funcarg(self, variable):
"Create an array function argument"
return ArrayFunctionArgument(
self.pos, variable.type, name=variable.name,
variable=variable,
data_pointer=self.data_pointer(variable),
#shape_pointer=self.shapevar(variable),
strides_pointer=self.stridesvar(variable))
def incref(self, var, funcname='Py_INCREF'):
"Generate a Py_INCREF() statement"
functype = minitypes.FunctionType(return_type=minitypes.void,
args=[minitypes.object_])
py_incref = self.funcname(functype, funcname)
return self.expr_stat(self.funccall(py_incref, [var]))
def decref(self, var):
"Generate a Py_DECCREF() statement"
return self.incref(var, funcname='Py_DECREF')
def print_(self, *args):
"Print out all arguments to stdout"
return PrintNode(self.pos, args=list(args))
def funccall(self, func_or_pointer, args, inline=False):
"""
Generate a call to the given function (a :py:class:`FuncNameNode`) of
:py:class:`minivect.minitypes.FunctionType` or a
pointer to a function type and the given arguments.
"""
type = func_or_pointer.type
if type.is_pointer:
type = func_or_pointer.type.base_type
return FuncCallNode(self.pos, type.return_type,
func_or_pointer=func_or_pointer, args=args,
inline=inline)
def funcname(self, type, name, is_external=True):
assert type.is_function
return FuncNameNode(self.pos, type, name=name, is_external=is_external)
def nditerate(self, body):
"""
This node wraps the given AST expression in an :py:class:`NDIterate`
node, which will be expanded by the specializers to one or several
loops.
"""
return NDIterate(self.pos, body)
def for_(self, body, init, condition, step, index=None):
"""
Create a for loop node.
:param body: loop body
:param init: assignment expression
:param condition: boolean loop condition
:param step: step clause (assignment expression)
"""
return ForNode(self.pos, init, condition, step, body, index=index)
def for_range_upwards(self, body, upper, lower=None, step=None):
"""
Create a single upwards for loop, typically used from a specializer to
replace an :py:class:`NDIterate` node.
:param body: the loop body
:param upper: expression specifying an upper bound
"""
index_type = upper.type.unqualify("const")
if lower is None:
lower = self.constant(0, index_type)
if step is None:
step = self.constant(1, index_type)
temp = self.temp(index_type)
init = self.assign_expr(temp, lower)
condition = self.binop(minitypes.bool_, '<', temp, upper)
step = self.assign_expr(temp, self.add(temp, step))
result = self.for_(body, init, condition, step)
result.target = temp
return result
def omp_for(self, for_node, if_clause):
"""
Annotate the for loop with an OpenMP parallel for clause.
:param if_clause: the expression node that determines whether the
parallel section is executed or whether it is
executed sequentially (to avoid synchronization
overhead)
"""
if isinstance(for_node, PragmaForLoopNode):
for_node = for_node.for_node
return OpenMPLoopNode(self.pos, for_node=for_node,
if_clause=if_clause,
lastprivates=[for_node.init.lhs],
privates=[])
def omp_if(self, if_body, else_body=None):
return OpenMPConditionalNode(self.pos, if_body=if_body,
else_body=else_body)
def pragma_for(self, for_node):
"""
Annotate the for loop with pragmas.
"""
return PragmaForLoopNode(self.pos, for_node=for_node)
def stats(self, *statements):
"""
Wrap a bunch of statements in an AST node.
"""
return StatListNode(self.pos, list(statements))
def expr_stat(self, expr):
"Turn an expression into a statement"
return ExprStatNode(expr.pos, type=expr.type, expr=expr)
def expr(self, stats=(), expr=None):
"Evaluate a bunch of statements before evaluating an expression."
return ExprNodeWithStatement(self.pos, type=expr.type,
stat=self.stats(*stats), expr=expr)
def if_(self, cond, body):
"If statement"
return self.if_else(cond, body, None)
def if_else_expr(self, cond, lhs, rhs):
"If/else expression, resulting in lhs if cond else rhs"
type = self.context.promote_types(lhs.type, rhs.type)
return IfElseExprNode(self.pos, type=type, cond=cond, lhs=lhs, rhs=rhs)
def if_else(self, cond, if_body, else_body):
return IfNode(self.pos, cond=cond, body=if_body, else_body=else_body)
def promote(self, dst_type, node):
"Promote or demote the node to the given dst_type"
if node.type != dst_type:
if node.is_constant and node.type.kind == dst_type.kind:
node.type = dst_type
return node
return PromotionNode(self.pos, dst_type, node)
return node
def binop(self, type, op, lhs, rhs):
"""
Binary operation on two nodes.
:param type: the result type of the expression
:param op: binary operator
:type op: str
"""
return BinopNode(self.pos, type, op, lhs, rhs)
def add(self, lhs, rhs, result_type=None, op='+'):
"""
Shorthand for the + binop. Filters out adding 0 constants.
"""
if lhs.is_constant and lhs.value == 0:
return rhs
elif rhs.is_constant and rhs.value == 0:
return lhs
if result_type is None:
result_type = self.context.promote_types(lhs.type, rhs.type)
return self.binop(result_type, op, lhs, rhs)
def sub(self, lhs, rhs, result_type=None):
return self.add(lhs, rhs, result_type, op='-')
def mul(self, lhs, rhs, result_type=None, op='*'):
"""
Shorthand for the * binop. Filters out multiplication with 1 constants.
"""
if op == '*' and lhs.is_constant and lhs.value == 1:
return rhs
elif rhs.is_constant and rhs.value == 1:
return lhs
if result_type is None:
result_type = self.context.promote_types(lhs.type, rhs.type)
return self.binop(result_type, op, lhs, rhs)
def div(self, lhs, rhs, result_type=None):
return self.mul(lhs, rhs, result_type=result_type, op='/')
def min(self, lhs, rhs):
"""
Returns min(lhs, rhs) expression.
.. NOTE:: Make lhs and rhs temporaries if they should only be
evaluated once.
"""
type = self.context.promote_types(lhs.type, rhs.type)
cmp_node = self.binop(type, '<', lhs, rhs)
return self.if_else_expr(cmp_node, lhs, rhs)
def index(self, pointer, index, dest_pointer_type=None):
"""
Index a pointer with the given index node.
:param dest_pointer_type: if given, cast the result (*after* adding
the index) to the destination type and
dereference.
"""
if dest_pointer_type:
return self.index_multiple(pointer, [index], dest_pointer_type)
return SingleIndexNode(self.pos, pointer.type.base_type,
pointer, index)
def index_multiple(self, pointer, indices, dest_pointer_type=None):
"""
Same as :py:meth:`index`, but accepts multiple indices. This is
useful e.g. after multiplication of the indices with the strides.
"""
for index in indices:
pointer = self.add(pointer, index)
if dest_pointer_type is not None:
pointer = self.cast(pointer, dest_pointer_type)
return self.dereference(pointer)
def assign_expr(self, node, value, may_reorder=False):
"Create an assignment expression assigning ``value`` to ``node``"
assert node is not None
if not isinstance(value, Node):
value = self.constant(value)
return AssignmentExpr(self.pos, node.type, node, value,
may_reorder=may_reorder)
def assign(self, node, value, may_reorder=False):
"Assignment statement"
expr = self.assign_expr(node, value, may_reorder=may_reorder)
return self.expr_stat(expr)
def dereference(self, pointer):
"Dereference a pointer"
return DereferenceNode(self.pos, pointer.type.base_type, pointer)
def unop(self, type, operator, operand):
"Unary operation. ``type`` indicates the result type of the expression."
return UnopNode(self.pos, type, operator, operand)
def coerce_to_temp(self, expr):
"Coerce the given expression to a temporary"
type = expr.type
if type.is_array:
type = type.dtype
temp = self.temp(type)
return self.expr(stats=[self.assign(temp, expr)], expr=temp)
def temp(self, type, name=None):
"Allocate a temporary of a given type"
name = name or 'temp'
repr_name = '%s%d' % (name.rstrip(string.digits),
self.temp_reprname_counter)
self.temp_reprname_counter += 1
return TempNode(self.pos, type, name=name, repr_name=repr_name)
def constant(self, value, type=None):
"""
Create a constant from a Python value. If type is not given, it is
inferred (or it will raise a
:py:class:`minivect.minierror.InferTypeError`).
"""
if type is None:
type = self._infer_type(value)
return ConstantNode(self.pos, type, value)
def variable(self, type, name):
"""
Create a variable with a name and type. Variables
may refer to function arguments, functions, etc.
"""
return Variable(self.pos, type, name)
def resolved_variable(self, array_type, name, element):
"""
Creates a node that keeps the array operand information such as the
original array type, but references an actual element in the array.
:param type: original array type
:param name: original array's name
:param element: arbitrary expression that resolves some element in the
array
"""
return ResolvedVariable(self.pos, element.type, name,
element=element, array_type=array_type)
def cast(self, node, dest_type):
"Cast node to the given destination type"
return CastNode(self.pos, dest_type, node)
def return_(self, result):
"Return a result"
return ReturnNode(self.pos, result)
def data_pointer(self, variable):
"Return the data pointer of an array variable"
assert variable.type.is_array
return DataPointer(self.pos, variable.type.dtype.pointer(),
variable)
def shape_index(self, index, function):
"Index the shape of the array operands with integer `index`"
return self.index(function.shape, self.constant(index))
def extent(self, variable, index, function):
"Index the shape of a specific variable with integer `index`"
assert variable.type.is_array
offset = function.ndim - variable.type.ndim
return self.index(function.shape, self.constant(index + offset))
def stridesvar(self, variable):
"Return the strides variable for the given array operand"
return StridePointer(self.pos, self.context.strides_type, variable)
def stride(self, variable, index):
"Return the stride of array operand `variable` at integer `index`"
return self.index(self.stridesvar(variable), self.constant(index))
def sizeof(self, type):
"Return the expression sizeof(type)"
return SizeofNode(self.pos, minitypes.size_t, sizeof_type=type)
def jump(self, label):
"Jump to a label"
return JumpNode(self.pos, label)
def jump_target(self, label):
"""
Return a target that can be jumped to given a label. The label is
shared between the jumpers and the target.
"""
return JumpTargetNode(self.pos, label)
def label(self, name):
"Return a label with a name"
return LabelNode(self.pos, name)
def raise_exc(self, posinfo, exc_var, msg_val, fmt_args):
"""
Raise an exception given the positional information (see the `posinfo`
method), the exception type (PyExc_*), a formatted message string and
a list of values to be used for the format string.
"""
return RaiseNode(self.pos, posinfo, exc_var, msg_val, fmt_args)
def posinfo(self, posvars):
"""
Return position information given a list of position variables
(filename, lineno, column). This can be used for raising exceptions.
"""
return PositionInfoNode(self.pos, posinfo=posvars)
def error_handler(self, node):
"""
Wrap the given node, which may raise exceptions, in an error handler.
An error handler allows the code to clean up before propagating the
error, and finally returning an error indicator from the function.
"""
return ErrorHandler(self.pos, body=node,
error_label=self.label('error'),
cleanup_label=self.label('cleanup'))
def wrap(self, opaque_node, specialize_node_callback, **kwds):
"""
Wrap a node and type and return a NodeWrapper node. This node
will have to be handled by the caller in a code generator. The
specialize_node_callback is called when the NodeWrapper is
specialized by a Specializer.
"""
type = minitypes.TypeWrapper(self.context.gettype(opaque_node),
self.context)
return NodeWrapper(self.context.getpos(opaque_node), type,
opaque_node, specialize_node_callback, **kwds)
#
### Vectorization Functionality
#
def _vector_type(self, base_type, size):
return minitypes.VectorType(element_type=base_type, vector_size=size)
def vector_variable(self, variable, size):
"Return a vector variable for a data pointer variable"
type = self._vector_type(variable.type.dtype, size)
if size == 4:
name = 'xmm_%s' % variable.name
else:
name = 'ymm_%s' % variable.name
return VectorVariable(self.pos, type, name, variable=variable)
def vector_load(self, data_pointer, size):
"Load a SIMD vector of size `size` given an array operand variable"
type = self._vector_type(data_pointer.type.base_type, size)
return VectorLoadNode(self.pos, type, data_pointer, size=size)
def vector_store(self, data_pointer, vector_expr):
"Store a SIMD vector of size `size`"
assert data_pointer.type.base_type == vector_expr.type.element_type
return VectorStoreNode(self.pos, None, "=", data_pointer, vector_expr)
def vector_binop(self, operator, lhs, rhs):
"Perform a binary SIMD operation between two operands of the same type"
assert lhs.type == rhs.type, (lhs.type, rhs.type)
type = lhs.type
return VectorBinopNode(self.pos, type, operator, lhs=lhs, rhs=rhs)
def vector_unop(self, type, operator, operand):
return VectorUnopNode(self.pos, type, operator, operand)
def vector_const(self, type, constant):
return ConstantVectorNode(self.pos, type, constant=constant)
def noop_expr(self):
return NoopExpr(self.pos, type=None)
class DynamicArgumentASTBuilder(ASTBuilder):
"""
Create a function with a dynamic number of arguments. This means the
signature looks like
func(int *shape, float *data[n_ops], int *strides[n_ops])
To create minivect kernels supporting this signature, set the
astbuilder_cls attribute of Context to this class.
"""
def data_pointer(self, variable):
if not hasattr(variable, 'data_pointer'):
temp = self.temp(variable.type.dtype.pointer(),
variable.name + "_data_temp")
variable.data_pointer = temp
return variable.data_pointer
def _create_data_pointer(self, function, argument, i):
variable = argument.variable
temp = self.data_pointer(variable)
p = self.index(function.data_pointers, self.constant(i))
p = self.cast(p, variable.type.dtype.pointer())
assmt = self.assign(temp, p)
function.body.stats.insert(0, assmt)
return temp
def stridesvar(self, variable):
"Return the strides variable for the given array operand"
if not hasattr(variable, 'strides_pointer'):
temp = self.temp(self.context.strides_type,
variable.name + "_stride_temp")
variable.strides_pointer = temp
return variable.strides_pointer
def _create_strides_pointer(self, function, argument, i):
variable = argument.variable
temp = self.stridesvar(variable)
strides = self.index(function.strides_pointers, self.constant(i))
function.body.stats.insert(0, self.assign(temp, strides))
return temp
def function(self, name, body, args, shapevar=None, posinfo=None,
omp_size=None):
function = super(DynamicArgumentASTBuilder, self).function(
name, body, args, shapevar, posinfo, omp_size)
function.data_pointers = self.variable(
minitypes.void.pointer().pointer(), 'data_pointers')
function.strides_pointers = self.variable(
function.shape.type.pointer(), 'strides_pointer')
i = len(function.arrays) - 1
for argument in function.arrays[::-1]:
data_p = self._create_data_pointer(function, argument, i)
strides_p = self._create_strides_pointer(function, argument, i)
argument.data_pointer = data_p
argument.strides_pointer = strides_p
argument.used = False
i -= 1
argpos = 1
if posinfo:
argpos = 4
function.arguments.insert(argpos,
self.funcarg(function.strides_pointers))
function.arguments.insert(argpos,
self.funcarg(function.data_pointers))
self.create_function_type(function)
# print function.type
# print self.context.debug_c(
# function, specializers.StridedSpecializer, type(self))
return function
Context.astbuilder_cls = UndocClassAttribute(ASTBuilder)
class Position(object):
"Each node has a position which is an instance of this type."
def __init__(self, filename, line, col):
self.filename = filename
self.line = line
self.col = col
def __str__(self):
return "%s:%d:%d" % (self.filename, self.line, self.col)
class Node(miniutils.ComparableObjectMixin):
"""
Base class for AST nodes.
"""
is_expression = False
is_statlist = False
is_constant = False
is_assignment = False
is_unop = False
is_binop = False
is_node_wrapper = False
is_data_pointer = False
is_jump = False
is_label = False
is_temp = False
is_statement = False
is_sizeof = False
is_variable = False
is_function = False
is_funcarg = False
is_array_funcarg = False
is_specialized = False
child_attrs = []
def __init__(self, pos, **kwds):
self.pos = pos
vars(self).update(kwds)
def may_error(self, context):
"""
Return whether something may go wrong and we need to jump to an
error handler.
"""
visitor = minivisitor.MayErrorVisitor(context)
visitor.visit(self)
return visitor.may_error
def print_tree(self, context):
visitor = minivisitor.PrintTree(context)
visitor.visit(self)
@property
def children(self):
return [getattr(self, attr) for attr in self.child_attrs
if getattr(self, attr) is not None]
@property
def comparison_objects(self):
type = getattr(self, 'type', None)
if type is None:
return self.children
return tuple(self.children) + (type,)
def __eq__(self, other):
# Don't use isinstance here, compare on exact type to be consistent
# with __hash__. Override where sensible
return (type(self) is type(other) and
self.comparison_objects == other.comparison_objects)
def __hash__(self):
h = hash(type(self))
for obj in self.comparison_objects:
h = h ^ hash(obj)
return h
class ExprNode(Node):
"Base class for expressions. Each node has a type."
is_expression = True
hoistable = False
need_temp = False
def __init__(self, pos, type, **kwds):
super(ExprNode, self).__init__(pos, **kwds)
self.type = type
class FunctionNode(Node):
"""
Function node. error_value and success_value are returned in case of
exceptions and success respectively.
.. attribute:: shape
the broadcast shape for all operands
.. attribute:: ndim
the ndim of the total broadcast' shape
.. attribute:: arguments
all array arguments
.. attribute:: scalar arguments
all non-array arguments
.. attribute:: posinfo
the position variables we can write to in case of an exception
.. attribute:: omp_size
the threshold of minimum data size needed before starting a parallel
section. May be overridden at any time before specialization time.
"""
is_function = True
child_attrs = ['body', 'arguments', 'scalar_arguments']
def __init__(self, pos, name, body, arguments, scalar_arguments,
shape, posinfo, error_value, success_value, omp_size):
super(FunctionNode, self).__init__(pos)
self.type = None # see ASTBuilder.create_function_type
self.name = name
self.body = body
self.arrays = [arg for arg in arguments if arg.type and arg.type.is_array]
self.arguments = arguments
self.scalar_arguments = scalar_arguments
self.shape = shape
self.posinfo = posinfo
self.error_value = error_value
self.success_value = success_value
self.omp_size = omp_size
self.args = dict((v.name, v) for v in arguments)
self.ndim = max(arg.type.ndim for arg in arguments
if arg.type and arg.type.is_array)
class FuncCallNode(ExprNode):
"""
Call a function given a pointer or its name (FuncNameNode)
"""
inline = False
child_attrs = ['func_or_pointer', 'args']
class FuncNameNode(ExprNode):
"""
Load an external function by its name.
"""
name = None
class ReturnNode(Node):
"Return an operand"
child_attrs = ['operand']
def __init__(self, pos, operand):
super(ReturnNode, self).__init__(pos)
self.operand = operand
class RaiseNode(Node):
"Raise a Python exception. The callee must hold the GIL."
child_attrs = ['posinfo', 'exc_var', 'msg_val', 'fmt_args']
def __init__(self, pos, posinfo, exc_var, msg_val, fmt_args):
super(RaiseNode, self).__init__(pos)
self.posinfo = posinfo
self.exc_var, self.msg_val, self.fmt_args = (exc_var, msg_val, fmt_args)
class PositionInfoNode(Node):
"""
Node that holds a position of where an error occurred. This position
needs to be returned to the callee if the callee supports it.
"""
class FunctionArgument(ExprNode):
"""
Argument to the FunctionNode. Array arguments contain multiple
actual arguments, e.g. the data and stride pointer.
.. attribute:: variable
some argument to the function (array or otherwise)
.. attribute:: variables
the actual variables this operand should be unpacked into
"""
child_attrs = ['variables']
if_funcarg = True
used = True
def __init__(self, pos, variable, variables):
super(FunctionArgument, self).__init__(pos, variable.type)
self.variables = variables
self.variable = variable
self.name = variable.name
self.args = dict((v.name, v) for v in variables)
class ArrayFunctionArgument(ExprNode):
"Array operand to the function"
child_attrs = ['data_pointer', 'strides_pointer']
is_array_funcarg = True
used = True
def __init__(self, pos, type, data_pointer, strides_pointer, **kwargs):
super(ArrayFunctionArgument, self).__init__(pos, type, **kwargs)
self.data_pointer = data_pointer
self.strides_pointer = strides_pointer
self.variables = [data_pointer, strides_pointer]
class PrintNode(Node):
"Print node for some arguments"
child_attrs = ['args']
class NDIterate(Node):
"""
Iterate in N dimensions. See :py:class:`ASTBuilder.nditerate`
"""
child_attrs = ['body']
def __init__(self, pos, body):
super(NDIterate, self).__init__(pos)
self.body = body
class ForNode(Node):
"""
A for loop, see :py:class:`ASTBuilder.for_`
"""
child_attrs = ['init', 'condition', 'step', 'body']
is_controlling_loop = False
is_tiling_loop = False
should_vectorize = False
is_fixup = False
def __init__(self, pos, init, condition, step, body, index=None):
super(ForNode, self).__init__(pos)
self.init = init
self.condition = condition
self.step = step
self.body = body
self.index = index or init.lhs
class IfNode(Node):
"An 'if' statement, see A for loop, see :py:class:`ASTBuilder.if_`"
child_attrs = ['cond', 'body', 'else_body']
should_vectorize = False
is_fixup = False
class StatListNode(Node):
"""
A node to wrap multiple statements, see :py:class:`ASTBuilder.stats`
"""
child_attrs = ['stats']
is_statlist = True
def __init__(self, pos, statements):
super(StatListNode, self).__init__(pos)
self.stats = statements
class ExprStatNode(Node):
"Turn an expression into a statement, see :py:class:`ASTBuilder.expr_stat`"
child_attrs = ['expr']
is_statement = True
class ExprNodeWithStatement(Node):
child_attrs = ['stat', 'expr']
class NodeWrapper(ExprNode):
"""
Adapt an opaque node to provide a consistent interface. This has to be
handled by the user's specializer. See :py:class:`ASTBuilder.wrap`
"""
is_node_wrapper = True
is_constant_scalar = False
child_attrs = []
def __init__(self, pos, type, opaque_node, specialize_node_callback,
**kwds):
super(NodeWrapper, self).__init__(pos, type)
self.opaque_node = opaque_node
self.specialize_node_callback = specialize_node_callback
vars(self).update(kwds)
def __hash__(self):
return hash(self.opaque_node)
def __eq__(self, other):
if getattr(other, 'is_node_wrapper ', False):
return self.opaque_node == other.opaque_node
return NotImplemented
def __deepcopy__(self, memo):
kwds = dict(vars(self))
kwds.pop('opaque_node')
kwds.pop('specialize_node_callback')
kwds = copy.deepcopy(kwds, memo)
opaque_node = self.specialize_node_callback(self, memo)
return type(self)(opaque_node=opaque_node,
specialize_node_callback=self.specialize_node_callback,
**kwds)
class BinaryOperationNode(ExprNode):
"Base class for binary operations"
child_attrs = ['lhs', 'rhs']
def __init__(self, pos, type, lhs, rhs, **kwds):
super(BinaryOperationNode, self).__init__(pos, type, **kwds)
self.lhs, self.rhs = lhs, rhs
class BinopNode(BinaryOperationNode):
"Node for binary operations"
is_binop = True
def __init__(self, pos, type, operator, lhs, rhs, **kwargs):
super(BinopNode, self).__init__(pos, type, lhs, rhs, **kwargs)
self.operator = operator
@property
def comparison_objects(self):
return (self.operator, self.lhs, self.rhs)
class SingleOperandNode(ExprNode):
"Base class for operations with one operand"
child_attrs = ['operand']
def __init__(self, pos, type, operand, **kwargs):
super(SingleOperandNode, self).__init__(pos, type, **kwargs)
self.operand = operand
class AssignmentExpr(BinaryOperationNode):
is_assignment = True
class IfElseExprNode(ExprNode):
child_attrs = ['cond', 'lhs', 'rhs']
class PromotionNode(SingleOperandNode):
pass
class UnopNode(SingleOperandNode):
is_unop = True
def __init__(self, pos, type, operator, operand, **kwargs):
super(UnopNode, self).__init__(pos, type, operand, **kwargs)
self.operator = operator
@property
def comparison_objects(self):
return (self.operator, self.operand)
class CastNode(SingleOperandNode):
is_cast = True
class DereferenceNode(SingleOperandNode):
is_dereference = True
class SingleIndexNode(BinaryOperationNode):
is_index = True
class ConstantNode(ExprNode):
is_constant = True
def __init__(self, pos, type, value):
super(ConstantNode, self).__init__(pos, type)
self.value = value
class SizeofNode(ExprNode):
is_sizeof = True
class Variable(ExprNode):
"""
Represents use of a function argument in the function.
"""
is_variable = True
mangled_name = None
hoisted = False
def __init__(self, pos, type, name, **kwargs):
super(Variable, self).__init__(pos, type, **kwargs)
self.name = name
self.array_type = None
def __eq__(self, other):
return isinstance(other, Variable) and self.name == other.name
def __hash__(self):
return hash(self.name)
class ResolvedVariable(Variable):
child_attrs = ['element']
def __eq__(self, other):
return (isinstance(other, ResolvedVariable) and
self.element == other.element)
class ArrayAttribute(Variable):
"Denotes an attribute of array operands, e.g. the data or stride pointers"
def __init__(self, pos, type, arrayvar):
super(ArrayAttribute, self).__init__(pos, type,
arrayvar.name + self._name)
self.arrayvar = arrayvar
class DataPointer(ArrayAttribute):
"Reference to the start of an array operand"
_name = '_data'
class StridePointer(ArrayAttribute):
"Reference to the stride pointer of an array variable operand"
_name = '_strides'
#class ShapePointer(ArrayAttribute):
# "Reference to the shape pointer of an array operand."
# _name = '_shape'
class TempNode(Variable):
"A temporary of a certain type"
is_temp = True
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(id(self))
class OpenMPLoopNode(Node):
"""
Execute a loop in parallel.
"""
child_attrs = ['for_node', 'if_clause', 'lastprivates', 'privates']
class OpenMPConditionalNode(Node):
"""
Execute if_body if _OPENMP, otherwise execute else_body.
"""
child_attrs = ['if_body', 'else_body']
class PragmaForLoopNode(Node):
"""
Generate compiler-specific pragmas to aid things like SIMDization.
"""
child_attrs = ['for_node']
class ErrorHandler(Node):
"""
A node to handle errors. If there is an error handler in the outer scope,
the specializer will first make this error handler generate disposal code
for the wrapped AST body, and then jump to the error label of the parent
error handler. At the outermost (function) level, the error handler simply
returns an error indication.
.. attribute:: error_label
point to jump to in case of an error
.. attribute:: cleanup_label
point to jump to in the normal case
It generates the following:
.. code-block:: c
error_var = 0;
...
goto cleanup;
error:
error_var = 1;
cleanup:
...
if (error_var)
goto outer_error_label;
"""
child_attrs = ['error_var_init', 'body', 'cleanup_jump',
'error_target_label', 'error_set', 'cleanup_target_label',
'cascade']
error_var_init = None
cleanup_jump = None
error_target_label = None
error_set = None
cleanup_target_label = None
cascade = None
class JumpNode(Node):
"A jump to a jump target"
child_attrs = ['label']
def __init__(self, pos, label):
Node.__init__(self, pos)
self.label = label
class JumpTargetNode(JumpNode):
"A point to jump to"
class LabelNode(ExprNode):
"A goto label or memory address that we can jump to"
def __init__(self, pos, name):
super(LabelNode, self).__init__(pos, None)
self.name = name
self.mangled_name = None
class NoopExpr(ExprNode):
"Do nothing expression"
#
### Vectorization Functionality
#
class VectorVariable(Variable):
child_attrs = ['variable']
class VectorLoadNode(SingleOperandNode):
"Load a SIMD vector"
class VectorStoreNode(BinopNode):
"Store a SIMD vector"
class VectorBinopNode(BinopNode):
"Binary operation on SIMD vectors"
class VectorUnopNode(SingleOperandNode):
"Unary operation on SIMD vectors"
class ConstantVectorNode(ExprNode):
"Load the constant into the vector register" | [
"[email protected]"
] | |
4354b781f08321717f27ae24f9ca7b2823049b1e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/117/usersdata/163/26262/submittedfiles/al2.py | 755eb8db7363ddb972d635033445678a25e04869 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | from __future__ import division
#INICIE SEU CODIGO AQUI
n= float('(input('Digite um numero real:'))
Real= n-inteiro
print('(inteiro)
print('%.2f' %real) | [
"[email protected]"
] | |
a1eee10ae915ce507e8ee0992bc14ba1d655000a | 58a47ad4c25023c868658a7c55a303b3130f3b16 | /backend/models/__init__.py | e6f581ed206f58bd9affb4ed05d45e4d57489acd | [] | no_license | jmnelmar/reminder120 | 79ecc9c956b19e967779bba4e4e7d40bdb5f094e | a18e99ff2c51193dff436530190dd1d1e61666a3 | refs/heads/master | 2021-03-06T05:58:48.435269 | 2020-03-10T02:02:26 | 2020-03-10T02:02:26 | 246,184,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | from backend.models.patient import Patient | [
"[email protected]"
] | |
d53fba9d4a171874a43be778f4b75c108a7f4482 | c54f5a7cf6de3ed02d2e02cf867470ea48bd9258 | /pyobjc/pyobjc-framework-Quartz/PyObjCTest/test_PDFAnnotationPopup.py | d3f43c1657a8c051103ca7d463852aef2104814e | [
"MIT"
] | permissive | orestis/pyobjc | 01ad0e731fbbe0413c2f5ac2f3e91016749146c6 | c30bf50ba29cb562d530e71a9d6c3d8ad75aa230 | refs/heads/master | 2021-01-22T06:54:35.401551 | 2009-09-01T09:24:47 | 2009-09-01T09:24:47 | 16,895 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 305 | py |
from PyObjCTools.TestSupport import *
from Quartz.PDFKit import *
class TestPDFAnnotationPopup (TestCase):
def testMethods(self):
self.failUnlessResultIsBOOL(PDFAnnotationPopup.isOpen)
self.failUnlessArgIsBOOL(PDFAnnotationPopup.setIsOpen_, 0)
if __name__ == "__main__":
main()
| [
"ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25"
] | ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25 |
12b143198ee564e1b2311b19892e0b052b92a34b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02677/s129355694.py | 8a3a6c982603a763e0eda9f0b0cd3a4f65454318 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | import math
a, b, h, m = map(int , input().split())
theta = abs( (h/12) + (m/60) * (1/12) - m/60)*2*math.pi
print(math.sqrt(b**2 * math.sin(theta)**2 + (b*math.cos(theta) -a) **2)) | [
"[email protected]"
] | |
63d117d1b2d5478d51e1f8fc837b4aeb7c54cc24 | cc619d6e81c39fe54d4875e3c6936e25bb8a7ebd | /demos/multiple_problems.py | 195c16a0fc049f57e60b621cecf8204fe7713ed0 | [] | no_license | joshua4289/python3-examples | cb01060f649c7dc97185566b00fa0d59a1ffdca3 | 70c1fd0b1e5bf25e82697257fb9f92cd06e922b7 | refs/heads/master | 2020-12-19T08:19:13.310071 | 2017-04-28T13:48:01 | 2017-04-28T13:48:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | class UnrecoverableError(Exception): pass
class MyException(Exception): pass
def main():
try:
part1()
part2()
part3()
part4()
except UnrecoverableError as e:
print("UnrecoverableError")
except MyException as e:
print("that's all folks")
def part1():
try:
raise MyException("oops")
print("part 1")
except MyException as e:
# log it
print(e)
raise
def part2():
try:
print("part 2")
except MyException as e:
print(e)
def part3():
try:
raise UnrecoverableError("oops")
print("part 3")
except MyException as e:
print(e)
def part4():
try:
print("part 4")
except MyException as e:
print(e)
main()
| [
"[email protected]"
] | |
404ecb75c55291f907d7c3903ecc926fadd45862 | 343eb5d9ea4bfb29191c8f967585278dc2892b3f | /RedBimEngine/constants.py | c434f8a5f202af2622c1eb7bd0cd389c9bb85b18 | [] | no_license | NauGaika/PikBim | 11bd1929f7ca63d740692c9a7bcc31715f11b38b | 28f3ce2d8d27eb062ee5173fb4e21c262c30ba4b | refs/heads/master | 2020-07-28T20:43:11.928011 | 2019-09-19T11:10:37 | 2019-09-19T11:10:37 | 209,531,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,496 | py | # -*- coding: utf-8 -*-
"""Хранятся константы.
USERNAME
HOME_DIR
DIR_SCRIPTS
STATIC_IMAGE
USER_SCRIPTS
LOGO
"""
import os
def get_username():
"""Получить текущее имя пользователя."""
uname = __revit__.Application.Username
uname = uname.split('@')[0]
uname = uname.replace('.', '')
return uname
def parent_dir(dir, count=1):
"""Получает родительску дирректорию."""
dir_name = dir
while count:
count -= 1
dir_name = os.path.dirname(dir_name)
dir_name = os.path.abspath(dir_name)
return dir_name
USERNAME = get_username()
HOME_DIR = '\\\\picompany.ru\\pikp\\Dep\\LKP4\\WORK\\scripts'
LOADER = os.path.join(HOME_DIR, 'loader')
GOOGLE = os.path.join(LOADER, 'Google')
DIR_SCRIPTS = os.path.join(HOME_DIR, 'scripts')
DIR_SYSTEM_SCRIPTS = os.path.join(HOME_DIR, 'systemscripts')
STATIC_IMAGE = os.path.join(HOME_DIR, 'static\\img')
USER_SCRIPTS = os.path.join(HOME_DIR, 'scripts\\Пользовательские.tab\\Скрипты.panel')
USER_SCRIPT_TEMP = os.path.join(HOME_DIR, 'scripts\\Пользовательские.tab\\Временный.panel\\Временный.pushbutton\\__init__.py')
LOGO = 'RB'
START_SCRIPT = os.path.join(HOME_DIR, 'common_scripts\\start_of_script.py')
__all__ = ['USERNAME', 'HOME_DIR', 'DIR_SCRIPTS', 'STATIC_IMAGE',
'USER_SCRIPTS', 'LOGO', 'START_SCRIPT', 'USER_SCRIPT_TEMP',
'LOADER', 'GOOGLE']
| [
"[email protected]"
] | |
6c47db3bb3320ccd1a523f04599fd9af3d2faa4d | b46dd521ffab869a32bf4580d2cbf35c94e550c6 | /domain/BallotDomain.py | 5aded2408c3e155ea708285a9fa57c2199518353 | [] | no_license | pawan-manishka/results-tabulation-tallysheets | b698b080c6393cf728ae3f79891e20dd2d309add | 7d2f065dfd03304e3aa8f079145bbc96d03c481c | refs/heads/master | 2020-06-14T19:02:21.523754 | 2019-07-03T08:34:05 | 2019-07-03T08:34:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from config import db
from models import BallotModel as Model
from domain import InvoiceItemDomain
def get_all():
result = Model.query.all()
return result
def create(body):
invoice_item = InvoiceItemDomain.create()
result = Model(
ballotId=body["ballotId"],
invoiceItemId=invoice_item.invoiceItemId
)
db.session.add(result)
db.session.commit()
return result
| [
"[email protected]"
] | |
d21d32ebb9613caff29cdf4a1c7c456ce7f5f81c | 3f6c16ea158a8fb4318b8f069156f1c8d5cff576 | /.PyCharm2019.1/system/python_stubs/-1850396913/_imp.py | 0b80f25b1fa57f9f166bff38ae22e695685dc730 | [] | no_license | sarthak-patidar/dotfiles | 08494170d2c0fedc0bbe719cc7c60263ce6fd095 | b62cd46f3491fd3f50c704f0255730af682d1f80 | refs/heads/master | 2020-06-28T23:42:17.236273 | 2019-10-01T13:56:27 | 2019-10-01T13:56:27 | 200,369,900 | 0 | 0 | null | 2019-08-03T12:56:33 | 2019-08-03T11:53:29 | Shell | UTF-8 | Python | false | false | 5,612 | py | # encoding: utf-8
# module _imp
# from (built-in)
# by generator 1.147
""" (Extremely) low-level import machinery bits as used by importlib and imp. """
# no imports
# functions
def acquire_lock(*args, **kwargs): # real signature unknown
"""
Acquires the interpreter's import lock for the current thread.
This lock should be used by import hooks to ensure thread-safety when importing
modules. On platforms without threads, this function does nothing.
"""
pass
def create_builtin(*args, **kwargs): # real signature unknown
""" Create an extension module. """
pass
def create_dynamic(*args, **kwargs): # real signature unknown
""" Create an extension module. """
pass
def exec_builtin(*args, **kwargs): # real signature unknown
""" Initialize a built-in module. """
pass
def exec_dynamic(*args, **kwargs): # real signature unknown
""" Initialize an extension module. """
pass
def extension_suffixes(*args, **kwargs): # real signature unknown
""" Returns the list of file suffixes used to identify extension modules. """
pass
def get_frozen_object(*args, **kwargs): # real signature unknown
""" Create a code object for a frozen module. """
pass
def init_frozen(*args, **kwargs): # real signature unknown
""" Initializes a frozen module. """
pass
def is_builtin(*args, **kwargs): # real signature unknown
""" Returns True if the module name corresponds to a built-in module. """
pass
def is_frozen(*args, **kwargs): # real signature unknown
""" Returns True if the module name corresponds to a frozen module. """
pass
def is_frozen_package(*args, **kwargs): # real signature unknown
""" Returns True if the module name is of a frozen package. """
pass
def lock_held(*args, **kwargs): # real signature unknown
"""
Return True if the import lock is currently held, else False.
On platforms without threads, return False.
"""
pass
def release_lock(*args, **kwargs): # real signature unknown
"""
Release the interpreter's import lock.
On platforms without threads, this function does nothing.
"""
pass
def _fix_co_filename(*args, **kwargs): # real signature unknown
"""
Changes code.co_filename to specify the passed-in file path.
code
Code object to change.
path
File path to use.
"""
pass
# classes
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is "mappingproxy({'__module__': '_frozen_importlib', '__doc__': 'Meta path import for built-in modules.\\n\\n All methods are either class or static methods to avoid the need to\\n instantiate the class.\\n\\n ', 'module_repr': <staticmethod object at 0x7ffb5f337048>, 'find_spec': <classmethod object at 0x7ffb5f337080>, 'find_module': <classmethod object at 0x7ffb5f3370b8>, 'create_module': <classmethod object at 0x7ffb5f3370f0>, 'exec_module': <classmethod object at 0x7ffb5f337128>, 'get_code': <classmethod object at 0x7ffb5f337198>, 'get_source': <classmethod object at 0x7ffb5f337208>, 'is_package': <classmethod object at 0x7ffb5f337278>, 'load_module': <classmethod object at 0x7ffb5f3372b0>, '__dict__': <attribute '__dict__' of 'BuiltinImporter' objects>, '__weakref__': <attribute '__weakref__' of 'BuiltinImporter' objects>})"
# variables with complex values
__spec__ = None # (!) real value is "ModuleSpec(name='_imp', loader=<class '_frozen_importlib.BuiltinImporter'>)"
| [
"[email protected]"
] | |
0b7f0957af77abd693ef66d5807db552ae045132 | 6571b77f6e6f37d6df91a9cf0c34297a2bee1eb9 | /site-packages/ansible/modules/extras/packaging/os/homebrew_cask.py | b480be90eb5fc36e8be1a87d11e8d55d9cabd523 | [
"Apache-2.0"
] | permissive | suntao789/Aclsm | ec02a04bb3ba14a1ea6a6c82a325da59d192d0f7 | 2202201c8279391386a4569e69f93d90eca5b96a | refs/heads/master | 2020-04-01T22:39:02.140836 | 2018-10-19T03:49:14 | 2018-10-19T03:49:14 | 153,719,890 | 0 | 0 | Apache-2.0 | 2018-10-19T03:11:46 | 2018-10-19T03:11:46 | null | UTF-8 | Python | false | false | 16,000 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Daniel Jaouen <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: homebrew_cask
author: Daniel Jaouen
short_description: Install/uninstall homebrew casks.
description:
- Manages Homebrew casks.
version_added: "1.6"
options:
name:
description:
- name of cask to install/remove
required: true
state:
description:
- state of the cask
choices: [ 'installed', 'uninstalled' ]
required: false
default: present
'''
EXAMPLES = '''
- homebrew_cask: name=alfred state=present
- homebrew_cask: name=alfred state=absent
'''
import os.path
import re
# exceptions -------------------------------------------------------------- {{{
class HomebrewCaskException(Exception):
pass
# /exceptions ------------------------------------------------------------- }}}
# utils ------------------------------------------------------------------- {{{
def _create_regex_group(s):
lines = (line.strip() for line in s.split('\n') if line.strip())
chars = filter(None, (line.split('#')[0].strip() for line in lines))
group = r'[^' + r''.join(chars) + r']'
return re.compile(group)
# /utils ------------------------------------------------------------------ }}}
class HomebrewCask(object):
'''A class to manage Homebrew casks.'''
# class regexes ------------------------------------------------ {{{
VALID_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
: # colons
. # dots
{sep} # the OS-specific path separator
- # dashes
'''.format(sep=os.path.sep)
VALID_BREW_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
. # dots
{sep} # the OS-specific path separator
- # dashes
'''.format(sep=os.path.sep)
VALID_CASK_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
. # dots
\+ # plusses
- # dashes
'''
INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
INVALID_CASK_REGEX = _create_regex_group(VALID_CASK_CHARS)
# /class regexes ----------------------------------------------- }}}
# class validations -------------------------------------------- {{{
@classmethod
def valid_path(cls, path):
'''
`path` must be one of:
- list of paths
- a string containing only:
- alphanumeric characters
- dashes
- spaces
- colons
- os.path.sep
'''
if isinstance(path, basestring):
return not cls.INVALID_PATH_REGEX.search(path)
try:
iter(path)
except TypeError:
return False
else:
paths = path
return all(cls.valid_brew_path(path_) for path_ in paths)
@classmethod
def valid_brew_path(cls, brew_path):
'''
`brew_path` must be one of:
- None
- a string containing only:
- alphanumeric characters
- dashes
- spaces
- os.path.sep
'''
if brew_path is None:
return True
return (
isinstance(brew_path, basestring)
and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
)
@classmethod
def valid_cask(cls, cask):
'''A valid cask is either None or alphanumeric + backslashes.'''
if cask is None:
return True
return (
isinstance(cask, basestring)
and not cls.INVALID_CASK_REGEX.search(cask)
)
@classmethod
def valid_state(cls, state):
'''
A valid state is one of:
- installed
- absent
'''
if state is None:
return True
else:
return (
isinstance(state, basestring)
and state.lower() in (
'installed',
'absent',
)
)
@classmethod
def valid_module(cls, module):
'''A valid module is an instance of AnsibleModule.'''
return isinstance(module, AnsibleModule)
# /class validations ------------------------------------------- }}}
# class properties --------------------------------------------- {{{
@property
def module(self):
return self._module
@module.setter
def module(self, module):
if not self.valid_module(module):
self._module = None
self.failed = True
self.message = 'Invalid module: {0}.'.format(module)
raise HomebrewCaskException(self.message)
else:
self._module = module
return module
@property
def path(self):
return self._path
@path.setter
def path(self, path):
if not self.valid_path(path):
self._path = []
self.failed = True
self.message = 'Invalid path: {0}.'.format(path)
raise HomebrewCaskException(self.message)
else:
if isinstance(path, basestring):
self._path = path.split(':')
else:
self._path = path
return path
@property
def brew_path(self):
return self._brew_path
@brew_path.setter
def brew_path(self, brew_path):
if not self.valid_brew_path(brew_path):
self._brew_path = None
self.failed = True
self.message = 'Invalid brew_path: {0}.'.format(brew_path)
raise HomebrewCaskException(self.message)
else:
self._brew_path = brew_path
return brew_path
@property
def params(self):
return self._params
@params.setter
def params(self, params):
self._params = self.module.params
return self._params
@property
def current_cask(self):
return self._current_cask
@current_cask.setter
def current_cask(self, cask):
if not self.valid_cask(cask):
self._current_cask = None
self.failed = True
self.message = 'Invalid cask: {0}.'.format(cask)
raise HomebrewCaskException(self.message)
else:
self._current_cask = cask
return cask
# /class properties -------------------------------------------- }}}
def __init__(self, module, path=None, casks=None, state=None):
self._setup_status_vars()
self._setup_instance_vars(module=module, path=path, casks=casks,
state=state)
self._prep()
# prep --------------------------------------------------------- {{{
def _setup_status_vars(self):
self.failed = False
self.changed = False
self.changed_count = 0
self.unchanged_count = 0
self.message = ''
def _setup_instance_vars(self, **kwargs):
for key, val in kwargs.iteritems():
setattr(self, key, val)
def _prep(self):
self._prep_path()
self._prep_brew_path()
def _prep_path(self):
if not self.path:
self.path = ['/usr/local/bin']
def _prep_brew_path(self):
if not self.module:
self.brew_path = None
self.failed = True
self.message = 'AnsibleModule not set.'
raise HomebrewCaskException(self.message)
self.brew_path = self.module.get_bin_path(
'brew',
required=True,
opt_dirs=self.path,
)
if not self.brew_path:
self.brew_path = None
self.failed = True
self.message = 'Unable to locate homebrew executable.'
raise HomebrewCaskException('Unable to locate homebrew executable.')
return self.brew_path
def _status(self):
return (self.failed, self.changed, self.message)
# /prep -------------------------------------------------------- }}}
def run(self):
try:
self._run()
except HomebrewCaskException:
pass
if not self.failed and (self.changed_count + self.unchanged_count > 1):
self.message = "Changed: %d, Unchanged: %d" % (
self.changed_count,
self.unchanged_count,
)
(failed, changed, message) = self._status()
return (failed, changed, message)
# checks ------------------------------------------------------- {{{
def _current_cask_is_installed(self):
if not self.valid_cask(self.current_cask):
self.failed = True
self.message = 'Invalid cask: {0}.'.format(self.current_cask)
raise HomebrewCaskException(self.message)
cmd = [self.brew_path, 'cask', 'list']
rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0])
if 'nothing to list' in err:
return False
elif rc == 0:
casks = [cask_.strip() for cask_ in out.split('\n') if cask_.strip()]
return self.current_cask in casks
else:
self.failed = True
self.message = err.strip()
raise HomebrewCaskException(self.message)
# /checks ------------------------------------------------------ }}}
# commands ----------------------------------------------------- {{{
def _run(self):
if self.state == 'installed':
return self._install_casks()
elif self.state == 'absent':
return self._uninstall_casks()
if self.command:
return self._command()
# updated -------------------------------- {{{
def _update_homebrew(self):
rc, out, err = self.module.run_command([
self.brew_path,
'update',
], path_prefix=self.path[0])
if rc == 0:
if out and isinstance(out, basestring):
already_updated = any(
re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
for s in out.split('\n')
if s
)
if not already_updated:
self.changed = True
self.message = 'Homebrew updated successfully.'
else:
self.message = 'Homebrew already up-to-date.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewCaskException(self.message)
# /updated ------------------------------- }}}
# installed ------------------------------ {{{
def _install_current_cask(self):
if not self.valid_cask(self.current_cask):
self.failed = True
self.message = 'Invalid cask: {0}.'.format(self.current_cask)
raise HomebrewCaskException(self.message)
if self._current_cask_is_installed():
self.unchanged_count += 1
self.message = 'Cask already installed: {0}'.format(
self.current_cask,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Cask would be installed: {0}'.format(
self.current_cask
)
raise HomebrewCaskException(self.message)
cmd = [opt
for opt in (self.brew_path, 'cask', 'install', self.current_cask)
if opt]
rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0])
if self._current_cask_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Cask installed: {0}'.format(self.current_cask)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewCaskException(self.message)
def _install_casks(self):
for cask in self.casks:
self.current_cask = cask
self._install_current_cask()
return True
# /installed ----------------------------- }}}
# uninstalled ---------------------------- {{{
def _uninstall_current_cask(self):
if not self.valid_cask(self.current_cask):
self.failed = True
self.message = 'Invalid cask: {0}.'.format(self.current_cask)
raise HomebrewCaskException(self.message)
if not self._current_cask_is_installed():
self.unchanged_count += 1
self.message = 'Cask already uninstalled: {0}'.format(
self.current_cask,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Cask would be uninstalled: {0}'.format(
self.current_cask
)
raise HomebrewCaskException(self.message)
cmd = [opt
for opt in (self.brew_path, 'cask', 'uninstall', self.current_cask)
if opt]
rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0])
if not self._current_cask_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Cask uninstalled: {0}'.format(self.current_cask)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewCaskException(self.message)
def _uninstall_casks(self):
for cask in self.casks:
self.current_cask = cask
self._uninstall_current_cask()
return True
# /uninstalled ----------------------------- }}}
# /commands ---------------------------------------------------- }}}
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=["cask"], required=False),
path=dict(required=False),
state=dict(
default="present",
choices=[
"present", "installed",
"absent", "removed", "uninstalled",
],
),
),
supports_check_mode=True,
)
p = module.params
if p['name']:
casks = p['name'].split(',')
else:
casks = None
path = p['path']
if path:
path = path.split(':')
else:
path = ['/usr/local/bin']
state = p['state']
if state in ('present', 'installed'):
state = 'installed'
if state in ('absent', 'removed', 'uninstalled'):
state = 'absent'
brew_cask = HomebrewCask(module=module, path=path, casks=casks,
state=state)
(failed, changed, message) = brew_cask.run()
if failed:
module.fail_json(msg=message)
else:
module.exit_json(changed=changed, msg=message)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
| [
"[email protected]"
] | |
c87a9b71d55168e5522480af101d1447f3f937bb | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_146/ch39_2020_04_13_14_51_58_180379.py | 17ddbc5af6d01d87824c7162766fd8f3643c4f1e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | def collatz(numero):
print(numero)
if numero == 1:
return
if numero % 2 == 0:
collatz(numero / 2)
else:
collatz(3 * numero + 1)
collatz(50) | [
"[email protected]"
] | |
0206e2fff226b78430fe479874f702f9fed0549a | 8b36013b62e5c39772c7d84444916fa0daec2783 | /flypy/typing.py | 953170734956b9e840e74adc4db3517a18ce9ea7 | [
"BSD-2-Clause"
] | permissive | filmackay/flypy | cc7cfad447905ecd2211ab462ccc9ca6e0b469a5 | 1fd06e2d4189d3355fa0e8c1a66657c5423591b4 | refs/heads/master | 2021-01-17T17:10:44.892002 | 2014-03-18T15:44:39 | 2014-03-18T15:44:39 | 17,741,144 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,025 | py | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import re
import sys
from pykit.utils import hashable
import datashape as ds
from datashape import (TypeVar, TypeConstructor, dshape,
coercion_cost as coerce, unify as blaze_unify,
free, TypeSet)
from datashape.error import UnificationError, CoercionError
__all__ = [
'TypeVar', 'TypeConstructor', 'dshape', 'coerce', 'blaze_unify',
'free', 'TypeSet', 'UnificationError',
]
#===------------------------------------------------------------------===
# Parsing
#===------------------------------------------------------------------===
def parse(s):
if s[0].isupper() and re.match('\w+$', s): # HACK
return TypeConstructor(s, 0, [])
return dshape(s)
def typemap():
from . import types
_blaze2flypy = {
ds.void : types.void,
ds.char : types.char,
ds.bool_ : types.bool_,
ds.int8 : types.int8,
ds.int16 : types.int16,
ds.int32 : types.int32,
ds.int64 : types.int64,
ds.uint8 : types.uint8,
ds.uint16 : types.uint16,
ds.uint32 : types.uint32,
ds.uint64 : types.uint64,
ds.float32 : types.float32,
ds.float64 : types.float64,
ds.complex64: types.complex64,
ds.complex128: types.complex128,
}
return _blaze2flypy
# TODO: implement our own typing rules
def resolve_type(t):
_blaze2flypy = typemap()
return ds.tmap(lambda x: _blaze2flypy.get(x, x), t)
def to_blaze(t):
replacements = dict((v, k) for k, v in typemap().items())
return ds.tmap(lambda x: replacements.get(x, x), t)
def unify(constraints, concrete=True):
"""
Unify a set of constraints. If `concrete` is set to True, the result
may not have any remaining free variables.
"""
cs = [(to_blaze(left), to_blaze(right)) for left, right in constraints]
result, remaining_constraints = blaze_unify(cs)
if concrete:
#if remaining:
# raise TypeError("Result is not concrete after unification")
for result_type in result:
if free(result_type):
raise TypeError(
"Result type stil has free variables: %s" % (result_type,))
return [resolve_type(t) for t in result]
#===------------------------------------------------------------------===
# Runtime
#===------------------------------------------------------------------===
@property
def bound(self):
freevars = free(self.impl.type)
# assert len(freevars) == len(key)
# TODO: Parameterization by type terms
return dict((t.symbol, v) for t, v in zip(freevars, self.parameters))
class MetaType(type):
"""
Type of types.
Attributes:
layout: {str: Type}
Layout of the type
fields: {str: FunctionWrapper}
Dict of methods
"""
_is_flypy_class = True
def __init__(self, name, bases, dct):
if 'type' not in dct:
return
type = dct['type']
self.layout = layout = dict(getattr(self, 'layout', {}))
# Set method fields
self.fields = fields = dict(_extract_fields(type, dct))
# Verify signatures
#for func in self.fields.values():
# verify_method_signature(type, func.signature)
# Construct layout
for attr, t in layout.items():
if isinstance(t, str):
layout[attr] = parse(t)
# Patch concrete type with fields, layout
type_constructor = type.__class__
type_constructor.impl = self
type_constructor.fields = fields
type_constructor.layout = layout
type_constructor.bound = bound
@property
def resolved_layout(self):
return dict((n, resolve_simple(self, t)) for n, t in layout.items())
type_constructor.resolved_layout = resolved_layout
modname = dct['__module__']
module = sys.modules.get(modname)
type_constructor.scope = vars(module) if module else {}
def __getitem__(self, key):
if not isinstance(key, tuple):
key = (key,)
# Construct concrete type
constructor = type(self.type)
result = constructor(*key)
return result
#===------------------------------------------------------------------===
# Utils
#===------------------------------------------------------------------===
def _extract_fields(type, dct):
from .functionwrapper import FunctionWrapper # circular...
from . import typing
fields = {}
for name, value in dct.items():
if isinstance(value, FunctionWrapper):
fields[name] = value
# TODO: layout...
return fields
def verify_method_signature(type, signature):
"""Verify a method signature in the context of the defining type"""
typebound = set([t.symbol for t in free(type)])
sigbound = set([t.symbol for argtype in signature.argtypes
for t in free(argtype)])
for t in free(signature.restype):
if t.symbol not in typebound and t.symbol not in sigbound:
raise TypeError("Type variable %s is not bound by the type or "
"argument types" % (t,))
#===------------------------------------------------------------------===
# Unification and type resolution
#===------------------------------------------------------------------===
def lookup_builtin_type(name):
from . import types
builtin_scope = {
'Function': types.Function,
'Pointer': types.Pointer,
'Bool': types.Bool,
'Int': types.Int,
'Float': types.Float,
'Void': types.Void,
}
return builtin_scope.get(name)
def resolve_in_scope(ty, scope):
"""
Resolve a parsed type in the current scope. For example, if we parse
Foo[X], look up Foo in the current scope and reconstruct it with X.
"""
def resolve(t):
if isinstance(type(t), TypeConstructor):
name = type(t).name
# Get the @jit class (e.g. Int)
if hasattr(t, 'impl'):
impl = t.impl # already resolved!
else:
impl = scope.get(name) or lookup_builtin_type(name)
if impl is None:
raise TypeError(
"Type constructor %r is not in the current scope" % (name,))
# Get the TypeConstructor for the @jit class (e.g.
# Int[nbits, unsigned])
ctor = impl.type.__class__
return ctor(*t.parameters)
elif isinstance(t, TypeVar) and t.symbol[0].isupper():
# Resolve bare types, e.g. a name like 'NoneType' is parsed as a
# TypeVar
if t.symbol == 'NoneType':
assert t.symbol in scope
if scope.get(t.symbol):
cls = scope[t.symbol]
return cls[()]
return t
return t
freevars = dict((v.symbol, v) for v in free(ty))
return ds.tmap(resolve, ty)
def substitute(solution, t):
"""
Substitute bound parameters for the corresponding free variables
"""
def f(t):
if isinstance(t, TypeVar):
return solution.get(t.symbol, t)
return t
return ds.tmap(f, t)
def resolve(type, scope, bound):
"""
Resolve a parsed flypy type in its scope.
Do this before applying unification.
"""
type = resolve_type(type)
type = resolve_in_scope(type, scope)
type = substitute(bound, type)
if isinstance(type, ds.DataShape) and not type.shape: # HACK
type = type.measure
return type
def resolve_simple(defining_type, type):
"""
Resolve type `type` with respect to the scope and bound variables of
`defining_type`.
E.g. if we have
class C(object):
layout = [('x', 'B[int32]')]
we must resolve B as a class in the scope `C` is defined in.
"""
return resolve(type, defining_type.scope, defining_type.bound)
def can_coerce(src_type, dst_type):
"""
Check whether we can coerce a value of type `src_type` to a value
of type `dst_type`
"""
try:
coerce(to_blaze(src_type), to_blaze(dst_type))
except CoercionError:
return False
else:
return True
#===------------------------------------------------------------------===
# Registry
#===------------------------------------------------------------------===
class OverlayRegistry(object):
def __init__(self):
self.overlays = {} # builtin -> flypy function
def overlay(self, pyfunc, flypyfunc):
assert pyfunc not in self.overlays, pyfunc
self.overlays[pyfunc] = flypyfunc
def lookup_overlay(self, pyfunc):
if not hashable(pyfunc):
return None
return self.overlays.get(pyfunc)
overlay_registry = OverlayRegistry()
overlay = overlay_registry.overlay | [
"[email protected]"
] | |
3ac41dad4d1438d2d9107b3e37b52d502857cc07 | 116acf603f5db8d626247355bf786c339ba95ea9 | /sendmsg/aliyun/aliyunsdkcore/auth/rpc_signature_composer.py | d5491de52ef7e3c4dd6ffb21d77def0155c0c21c | [] | no_license | dahunuaa/ZhihuiSMB_python3 | 0857afeec2337b44571986a9c70c26e716142ccb | 8db2708efccd5eefa393738500e326bd7fb65c21 | refs/heads/master | 2021-01-25T14:32:32.201879 | 2018-03-11T05:59:10 | 2018-03-11T05:59:10 | 123,703,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,900 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#coding=utf-8
__author__ = 'alex jiang'
import os
import sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parentdir)
from . import sha_hmac1 as mac1
import urllib
import urllib.request
from ..utils import parameter_helper as helper
def __init__():
pass
# this function will append the necessary parameters for signer process.
# parameters: the orignal parameters
# signer: sha_hmac1 or sha_hmac256
# accessKeyId: this is aliyun_access_key_id
# format: XML or JSON
def __refresh_sign_parameters(parameters, access_key_id, accept_format="JSON", signer=mac1):
if parameters is None or not isinstance(parameters, dict):
parameters = dict()
parameters["Timestamp"] = helper.get_iso_8061_date()
parameters["SignatureMethod"] = signer.get_signer_name()
parameters["SignatureVersion"] = signer.get_singer_version()
parameters["SignatureNonce"] = helper.get_uuid()
parameters["AccessKeyId"] = access_key_id
if accept_format is not None:
parameters["Format"] = accept_format
return parameters
def __pop_standard_urlencode(query):
ret = urllib.parse.urlencode(query)
ret = ret.replace('+', '%20')
ret = ret.replace('*', '%2A')
ret = ret.replace('%7E', '~')
return ret
def __compose_string_to_sign(method, queries):
canonicalized_query_string = ""
sorted_parameters = sorted(queries.items(), key=lambda queries: queries[0])
string_to_sign = method + "&%2F&" + urllib.request.pathname2url(__pop_standard_urlencode(sorted_parameters))
return string_to_sign
def __get_signature(string_to_sign, secret, signer=mac1):
return signer.get_sign_string(string_to_sign, secret + '&')
def get_signed_url(params, ak, secret, accept_format, method, signer=mac1):
sign_params = __refresh_sign_parameters(params, ak, accept_format, signer)
string_to_sign = __compose_string_to_sign(method, sign_params)
signature = __get_signature(string_to_sign, secret, signer)
sign_params['Signature'] = signature
url = '/?' + __pop_standard_urlencode(sign_params)
return url
| [
"dahu yao"
] | dahu yao |
83fab9a5b14ab84b25fc69ef404b2b327393cf8e | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/wroscoe_donkey/donkey-master/scripts/upload.py | de1854714fd61e71babfcf07623b5a3b0f002c07 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 221 | py | import donkey as dk
sess
dk.sessions.pickle_sessions(sessions_folder='/home/wroscoe/donkey_data/sessions/',
session_names=['f8'],
file_path='/home/wroscoe/f8.pkl') | [
"[email protected]"
] | |
f6dd5238086e238bdf7f038a4485afc984f0e1c1 | ee8fc61e653410c377dcc8e2e2652f82ab03fd0a | /scripts/mode_background.py | 2c05b7997c193b8b78548761a28f7fc111f3cbeb | [
"Apache-2.0"
] | permissive | alcinos/dps | f8b2360b55676db95aa6f717eca935a77e46eb3e | 5467db1216e9f9089376d2c71f524ced2382e4f6 | refs/heads/master | 2020-05-18T19:07:37.809901 | 2019-04-30T13:43:17 | 2019-04-30T13:43:17 | 184,602,771 | 0 | 0 | Apache-2.0 | 2019-05-02T15:09:36 | 2019-05-02T15:09:35 | null | UTF-8 | Python | false | false | 1,553 | py | import matplotlib.pyplot as plt
import numpy as np
import collections
from dps.utils import sha_cache, NumpySeed
from dps.datasets.atari import StaticAtariDataset
def compute_background(data, mode_threshold):
assert data.dtype == np.uint8
mask = np.zeros(data.shape[1:3])
background = np.zeros(data.shape[1:4])
for i in range(data.shape[1]):
for j in range(data.shape[2]):
print("Doing {}".format((i, j)))
channel = [tuple(cell) for cell in data[:, i, j, ...]]
counts = collections.Counter(channel)
mode, mode_count = counts.most_common(1)[0]
if mode_count / data.shape[0] > mode_threshold:
mask[i, j] = 1
background[i, j, ...] = mode_count
else:
mask[i, j] = 0
return mask, background
@sha_cache("compute_background")
def f(game, N, in_colour, threshold, seed):
print("Computing background...")
with NumpySeed(seed):
dset = StaticAtariDataset(game=game, after_warp=not in_colour)
X = dset.x
if N:
X = X[:N]
mask, background = compute_background(X, threshold)
return mask, background
game = "IceHockeyNoFrameskip-v4"
in_colour = False
N = 1000
threshold = 0.8
seed = 0
mask, background = f(game, N, in_colour, threshold, seed)
if not in_colour:
background = background[..., 0]
fig, axes = plt.subplots(1, 2)
axes[0].imshow(mask)
axes[0].set_title("Mask")
axes[1].imshow(background)
axes[1].set_title("Background")
plt.show()
| [
"[email protected]"
] | |
fc8c5da0960349390d61a0cf7d7984c855348067 | 31148947a504352ea624d74cf6afa6bd0058bd5d | /lando/k8s/tests/test_cluster.py | f1b30bf70ecc21ea8017de91d4028d422f65c943 | [
"MIT"
] | permissive | Duke-GCB/lando | bcdeb20b7fcd694ae1f2baf016859baaa443abdb | 2ee80c4e9a2bfeac28d876eff92ba2c23960405a | refs/heads/master | 2021-06-20T06:25:12.064415 | 2019-08-27T14:53:53 | 2019-08-27T14:53:53 | 74,495,649 | 1 | 1 | MIT | 2021-03-25T21:27:55 | 2016-11-22T17:11:22 | Python | UTF-8 | Python | false | false | 19,270 | py | from unittest import TestCase
from unittest.mock import patch, Mock, call
from lando.k8s.cluster import ClusterApi, AccessModes, Container, SecretVolume, SecretEnvVar, EnvVarSource, \
FieldRefEnvVar, VolumeBase, SecretVolume, PersistentClaimVolume, ConfigMapVolume, BatchJobSpec, \
ItemNotFoundException
from kubernetes import client
from dateutil.parser import parse
class TestClusterApi(TestCase):
def setUp(self):
self.cluster_api = ClusterApi(host='somehost', token='myToken', namespace='lando-job-runner', verify_ssl=False)
self.mock_core_api = Mock()
self.mock_batch_api = Mock()
self.cluster_api.core = self.mock_core_api
self.cluster_api.batch = self.mock_batch_api
def test_constructor(self):
configuration = self.cluster_api.api_client.configuration
self.assertEqual(configuration.host, 'somehost')
self.assertEqual(configuration.api_key, {"authorization": "Bearer myToken"})
self.assertEqual(configuration.verify_ssl, False)
def test_constructor_verify_with_ca(self):
cluster_api = ClusterApi(host='somehost', token='myToken', namespace='lando-job-runner',
verify_ssl=True, ssl_ca_cert='/tmp/myfile.crt')
configuration = cluster_api.api_client.configuration
self.assertEqual(configuration.verify_ssl, True)
self.assertEqual(configuration.ssl_ca_cert, '/tmp/myfile.crt')
def test_create_persistent_volume_claim(self):
resp = self.cluster_api.create_persistent_volume_claim(name='myvolume', storage_size_in_g=2,
storage_class_name='gluster',
labels={"bespin": "true"})
self.assertEqual(resp, self.mock_core_api.create_namespaced_persistent_volume_claim.return_value)
args, kwargs = self.mock_core_api.create_namespaced_persistent_volume_claim.call_args
namespace = args[0]
self.assertEqual(namespace, 'lando-job-runner')
pvc = args[1]
self.assertEqual(pvc.metadata.name, 'myvolume')
self.assertEqual(pvc.metadata.labels, {"bespin": "true"})
self.assertEqual(pvc.spec.access_modes, [AccessModes.READ_WRITE_MANY])
self.assertEqual(pvc.spec.resources.requests, {'storage': '2Gi'})
self.assertEqual(pvc.spec.storage_class_name, 'gluster')
def test_create_persistent_volume_claim_custom_access_mode(self):
resp = self.cluster_api.create_persistent_volume_claim(name='myvolume', storage_size_in_g=2,
storage_class_name='gluster',
access_modes=[AccessModes.READ_WRITE_ONCE])
self.assertEqual(resp, self.mock_core_api.create_namespaced_persistent_volume_claim.return_value)
args, kwargs = self.mock_core_api.create_namespaced_persistent_volume_claim.call_args
pvc = args[1]
self.assertEqual(pvc.spec.access_modes, [AccessModes.READ_WRITE_ONCE])
def test_delete_persistent_volume_claim(self):
self.cluster_api.delete_persistent_volume_claim(name='myvolume')
self.mock_core_api.delete_namespaced_persistent_volume_claim.assert_called_with(
'myvolume', 'lando-job-runner', client.V1DeleteOptions()
)
def test_create_secret(self):
resp = self.cluster_api.create_secret(name='mysecret', string_value_dict={
'password': 's3cr3t'
}, labels={"bespin": "true"})
self.assertEqual(resp, self.mock_core_api.create_namespaced_secret.return_value)
args, kwargs = self.mock_core_api.create_namespaced_secret.call_args
self.assertEqual(kwargs['namespace'], 'lando-job-runner')
self.assertEqual(kwargs['body'].metadata.name, 'mysecret')
self.assertEqual(kwargs['body'].metadata.labels, {"bespin": "true"})
self.assertEqual(kwargs['body'].string_data, {'password': 's3cr3t'})
def test_delete_secret(self):
self.cluster_api.delete_secret(name='mysecret')
self.mock_core_api.delete_namespaced_secret.assert_called_with(
'mysecret', 'lando-job-runner', body=client.V1DeleteOptions()
)
def test_create_job(self):
mock_batch_job_spec = Mock()
resp = self.cluster_api.create_job(name='myjob',
batch_job_spec=mock_batch_job_spec,
labels={"bespin": "true"})
self.assertEqual(resp, self.mock_batch_api.create_namespaced_job.return_value)
args, kwargs = self.mock_batch_api.create_namespaced_job.call_args
self.assertEqual(args[0], 'lando-job-runner')
self.assertEqual(args[1].metadata.name, 'myjob')
self.assertEqual(args[1].metadata.labels, {"bespin": "true"})
self.assertEqual(args[1].spec, mock_batch_job_spec.create.return_value)
@patch('lando.k8s.cluster.watch')
def test_wait_for_job_events(self, mock_watch):
callback = Mock()
mock_watch.Watch.return_value.stream.return_value = [
{'object': 'job1', 'type': 'ADDED'},
{'object': 'job2', 'type': 'ADDED'},
]
self.cluster_api.wait_for_job_events(callback)
callback.assert_has_calls([
call({'object': 'job1', 'type': 'ADDED'}),
call({'object': 'job2', 'type': 'ADDED'}),
])
args, kwargs = mock_watch.Watch.return_value.stream.call_args
self.assertEqual(args[1], 'lando-job-runner')
self.assertEqual(kwargs['label_selector'], None)
@patch('lando.k8s.cluster.watch')
def test_wait_for_job_events_with_label_selector(self, mock_watch):
callback = Mock()
mock_watch.Watch.return_value.stream.return_value = []
self.cluster_api.wait_for_job_events(Mock(), label_selector='name=mypod')
args, kwargs = mock_watch.Watch.return_value.stream.call_args
self.assertEqual(args[1], 'lando-job-runner')
self.assertEqual(kwargs['label_selector'], 'name=mypod')
def test_delete_job(self):
self.cluster_api.delete_job(name='myjob')
args, kwargs = self.mock_batch_api.delete_namespaced_job.call_args
self.assertEqual(args[0], 'myjob')
self.assertEqual(args[1], 'lando-job-runner')
self.assertEqual(kwargs['body'].propagation_policy, 'Background')
def test_delete_job_custom_propogation_policy(self):
self.cluster_api.delete_job(name='myjob', propagation_policy='Foreground')
args, kwargs = self.mock_batch_api.delete_namespaced_job.call_args
self.assertEqual(kwargs['body'].propagation_policy, 'Foreground')
def test_create_config_map(self):
resp = self.cluster_api.create_config_map(name='myconfig',
data={'threads': 2},
labels={"bespin": "true"})
self.assertEqual(resp, self.mock_core_api.create_namespaced_config_map.return_value)
args, kwargs = self.mock_core_api.create_namespaced_config_map.call_args
self.assertEqual(args[0], 'lando-job-runner')
self.assertEqual(args[1].metadata.name, 'myconfig')
self.assertEqual(args[1].metadata.labels, {"bespin": "true"})
self.assertEqual(args[1].data, {'threads': 2})
def test_delete_config_map(self):
self.cluster_api.delete_config_map(name='myconfig')
args, kwargs = self.mock_core_api.delete_namespaced_config_map.call_args
self.assertEqual(args[0], 'myconfig')
self.assertEqual(args[1], 'lando-job-runner')
def test_read_pod_logs(self):
# Added _preload_content argument to allow fetching actual text instead of parsed
# based on https://github.com/kubernetes/kubernetes/issues/37881#issuecomment-264366664
resp = self.cluster_api.read_pod_logs('mypod')
log_stream = self.mock_core_api.read_namespaced_pod_log.return_value
self.assertEqual(resp, log_stream.read.return_value.decode.return_value)
log_stream.read.return_value.decode.assert_called_with('utf-8')
self.mock_core_api.read_namespaced_pod_log.assert_called_with('mypod', 'lando-job-runner',
_preload_content=False)
def test_list_pods(self):
resp = self.cluster_api.list_pods(label_selector='bespin=true')
self.mock_core_api.list_namespaced_pod.assert_called_with(
'lando-job-runner', label_selector='bespin=true'
)
mock_pod_list = self.mock_core_api.list_namespaced_pod.return_value
self.assertEqual(resp, mock_pod_list.items)
def test_list_persistent_volume_claims(self):
resp = self.cluster_api.list_persistent_volume_claims(label_selector='bespin=true')
self.mock_core_api.list_namespaced_persistent_volume_claim.assert_called_with(
'lando-job-runner', label_selector='bespin=true'
)
mock_pvc_list = self.mock_core_api.list_namespaced_persistent_volume_claim.return_value
self.assertEqual(resp, mock_pvc_list.items)
def test_list_jobs(self):
resp = self.cluster_api.list_jobs(label_selector='bespin=true')
self.mock_batch_api.list_namespaced_job.assert_called_with(
'lando-job-runner', label_selector='bespin=true'
)
mock_job_list = self.mock_batch_api.list_namespaced_job.return_value
self.assertEqual(resp, mock_job_list.items)
def test_list_config_maps(self):
resp = self.cluster_api.list_config_maps(label_selector='bespin=true')
self.mock_core_api.list_namespaced_config_map.assert_called_with(
'lando-job-runner', label_selector='bespin=true'
)
mock_config_map_list = self.mock_core_api.list_namespaced_config_map.return_value
self.assertEqual(resp, mock_config_map_list.items)
def test_read_job_logs(self):
mock_pod = Mock()
mock_pod.metadata.name = 'myjob-abcd'
self.cluster_api.get_most_recent_pod_for_job = Mock()
self.cluster_api.get_most_recent_pod_for_job.return_value = mock_pod
self.cluster_api.read_pod_logs = Mock()
logs = self.cluster_api.read_job_logs('myjob')
self.assertEqual(logs, self.cluster_api.read_pod_logs.return_value)
self.cluster_api.get_most_recent_pod_for_job.assert_called_with('myjob')
self.cluster_api.read_pod_logs.assert_called_with('myjob-abcd')
def test_get_most_recent_pod_for_job_name__no_pods_found(self):
self.cluster_api.list_pods = Mock()
self.cluster_api.list_pods.return_value = []
with self.assertRaises(ItemNotFoundException) as raised_exception:
self.cluster_api.get_most_recent_pod_for_job('myjob')
self.assertEqual(str(raised_exception.exception), 'No pods found with job name myjob.')
def test_get_most_recent_pod_for_job_name__finds_pod(self):
pod1 = Mock()
pod1.metadata.creation_timestamp = parse("2012-01-01 12:30:00")
pod2 = Mock()
pod2.metadata.creation_timestamp = parse("2012-01-01 12:50:00")
pod3 = Mock()
pod3.metadata.creation_timestamp = parse("2012-01-01 12:40:00")
self.cluster_api.list_pods = Mock()
self.cluster_api.list_pods.return_value = [
pod1, pod2, pod3
]
pod = self.cluster_api.get_most_recent_pod_for_job('myjob')
self.assertEqual(pod, pod2)
class TestContainer(TestCase):
def test_minimal_create(self):
container = Container(
name='mycontainer', image_name='someimage', command=['wc', '-l']
)
container_dict = container.create().to_dict()
self.assertEqual(container_dict['name'], 'mycontainer')
self.assertEqual(container_dict['image'], 'someimage')
self.assertEqual(container_dict['command'], ['wc', '-l'])
self.assertEqual(container_dict['args'], [])
def test_full_create(self):
container = Container(
name='mycontainer2',
image_name='someimage',
command=['wc'],
args=['-l'],
working_dir='/tmp/data',
env_dict={
'SOMEENV': 'SomeVal'
},
requested_cpu='2',
requested_memory='200M',
volumes=[
SecretVolume(name='mymountedvolume', mount_path='/secret', secret_name='mysecret')
]
)
container_dict = container.create().to_dict()
expected_container_dict = {
'name': 'mycontainer2',
'image': 'someimage',
'image_pull_policy': None,
'command': ['wc'],
'args': ['-l'],
'working_dir': '/tmp/data',
'env': [
{
'name': 'SOMEENV',
'value': 'SomeVal',
'value_from': None
}
],
'env_from': None,
'resources': {
'limits': None,
'requests': {
'cpu': '2',
'memory': '200M'
}
},
'volume_mounts': [
{
'mount_path': '/secret',
'mount_propagation': None,
'name': 'mymountedvolume',
'read_only': None,
'sub_path': None
}
],
'lifecycle': None,
'liveness_probe': None,
'ports': None,
'readiness_probe': None,
'security_context': None,
'stdin': None,
'stdin_once': None,
'termination_message_path': None,
'termination_message_policy': None,
'tty': None,
'volume_devices': None, }
self.assertEqual(container_dict, expected_container_dict)
def test_create_env(self):
container = Container(
name='mycontainer', image_name='someimage', command=['wc', '-l'],
env_dict={
'USERNAME': 'joe',
'PASSWORD': SecretEnvVar(name='mysecret', key='username')
}
)
env = container.create_env()
self.assertEqual(len(env), 2)
self.assertEqual(env[0].name, 'USERNAME')
self.assertEqual(env[0].value, 'joe')
self.assertEqual(env[0].value_from, None)
self.assertEqual(env[1].name, 'PASSWORD')
self.assertEqual(env[1].value, None)
self.assertEqual(env[1].value_from.to_dict()['secret_key_ref'],
{'key': 'username', 'name': 'mysecret', 'optional': None})
def test_create_resource_requirements(self):
container = Container(
name='mycontainer', image_name='someimage', command=['wc', '-l'],
requested_memory='200M', requested_cpu=4,
)
requirements = container.create_resource_requirements()
expected_requirements = {
'limits': None,
'requests': {
'cpu': 4,
'memory': '200M'
}
}
self.assertEqual(requirements.to_dict(), expected_requirements)
class TestEnvVarSource(TestCase):
def test_create_env_var_source_is_required(self):
with self.assertRaises(NotImplementedError):
EnvVarSource().create_env_var_source()
class TestSecretEnvVar(TestCase):
def test_create_env_var_source(self):
env_var = SecretEnvVar(name='mysecret', key='mykey')
env_var_source_dict = env_var.create_env_var_source().to_dict()
self.assertEqual(env_var_source_dict['secret_key_ref'],
{'key': 'mykey', 'name': 'mysecret', 'optional': None})
class TestFieldRefEnvVar(TestCase):
def test_create_env_var_source(self):
env_var = FieldRefEnvVar(field_path='metadata.name')
env_var_source_dict = env_var.create_env_var_source().to_dict()
self.assertEqual(env_var_source_dict['field_ref'],
{'api_version': None, 'field_path': 'metadata.name'})
class TestVolumeBase(TestCase):
def test_create_volume_mount(self):
volume = VolumeBase(name='myvolume', mount_path='/data')
volume_dict = volume.create_volume_mount().to_dict()
self.assertEqual(volume_dict, {
'mount_path': '/data',
'mount_propagation': None,
'name': 'myvolume',
'read_only': None,
'sub_path': None
})
def test_create_volume_is_required(self):
volume = VolumeBase(name='myvolume', mount_path='/data')
with self.assertRaises(NotImplementedError):
volume.create_volume()
class TestSecretVolume(TestCase):
def test_create_volume(self):
volume = SecretVolume(name='myvolume', mount_path='/data2', secret_name='mysecret')
volume_dict = volume.create_volume().to_dict()
self.assertEqual(volume_dict['secret']['secret_name'], 'mysecret')
class TestPersistentClaimVolume(TestCase):
def test_create_volume(self):
volume = PersistentClaimVolume(name='myvolume', mount_path='/data3', volume_claim_name='mypvc')
volume_dict = volume.create_volume().to_dict()
self.assertEqual(volume_dict['persistent_volume_claim'],
{'claim_name': 'mypvc', 'read_only': False})
class TestConfigMapVolume(TestCase):
def test_create_volume(self):
volume = ConfigMapVolume(name='myvolume', mount_path='/data/config.dat',
config_map_name='myconfig', source_key='datastore', source_path='config')
volume_dict = volume.create_volume().to_dict()
expected_dict = {
'default_mode': None,
'items': [{'key': 'datastore', 'mode': None, 'path': 'config'}],
'name': 'myconfig',
'optional': None
}
self.assertEqual(volume_dict['config_map'], expected_dict)
class TestBatchJobSpec(TestCase):
def test_create(self):
container = Container(
name='mycontainer', image_name='someimage', command=['wc', '-l']
)
spec = BatchJobSpec(name='mybatch', container=container, labels={'service': 'bespin'})
spec_dict = spec.create().to_dict()
self.assertEqual(spec_dict['template']['metadata']['name'], 'mybatchspec')
self.assertEqual(spec_dict['template']['spec']['containers'], [container.create().to_dict()])
def test_create_with_service_account_name(self):
container = Container(
name='mycontainer', image_name='someimage', command=['wc', '-l']
)
spec = BatchJobSpec(name='mybatch', container=container,
labels={'service': 'bespin'},
service_account_name='sa-name')
spec_dict = spec.create().to_dict()
self.assertEqual(spec_dict['template']['metadata']['name'], 'mybatchspec')
self.assertEqual(spec_dict['template']['spec']['containers'], [container.create().to_dict()])
self.assertEqual(spec_dict['template']['spec']['service_account_name'], 'sa-name')
| [
"[email protected]"
] | |
6cfac94954dfc1ed9b3f323587ba8366b3f4bc6c | 186158704058dcbeef84caf6d0fa220d127719dc | /bin/unzip-bpo.py | ff2a23ced8fa03791d5bbc10a1bf624b58af33ed | [] | no_license | davidmcclure/quotes | a18afc88315b3157ddb69f14ca0e8e69bdd6ff68 | 1460a732091afb5f39d484a4644e8c05dd1af201 | refs/heads/master | 2021-01-11T07:59:35.648905 | 2017-02-18T04:04:56 | 2017-02-18T04:04:56 | 72,132,830 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | #!/usr/bin/env python
from quotes.services import config
from quotes.jobs.unzip_bpo import UnzipBPO
if __name__ == '__main__':
job = UnzipBPO(corpus_dir=config['bpo_corpus_dir'])
job()
| [
"[email protected]"
] | |
b8ae8d56a11e5258bfb2c93b76c59497a3c5e236 | eb64b799ff1d7ef3a244bf8e6f9f4e9118d5cfcd | /homeassistant/components/p1_monitor/sensor.py | edc076382ec4043e5ea649afba8f4279c885c95d | [
"Apache-2.0"
] | permissive | JeffLIrion/home-assistant | 53966b81b5d5816679f12fc761f79e8777c738d6 | 8f4ec89be6c2505d8a59eee44de335abe308ac9f | refs/heads/dev | 2023-08-22T09:42:02.399277 | 2022-02-16T01:26:13 | 2022-02-16T01:26:13 | 136,679,169 | 5 | 2 | Apache-2.0 | 2023-09-13T06:59:25 | 2018-06-09T00:58:35 | Python | UTF-8 | Python | false | false | 10,435 | py | """Support for P1 Monitor sensors."""
from __future__ import annotations
from typing import Literal
from homeassistant.components.sensor import (
DOMAIN as SENSOR_DOMAIN,
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CURRENCY_EURO,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
POWER_WATT,
VOLUME_CUBIC_METERS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import P1MonitorDataUpdateCoordinator
from .const import (
DOMAIN,
SERVICE_PHASES,
SERVICE_SETTINGS,
SERVICE_SMARTMETER,
SERVICES,
)
SENSORS: dict[
Literal["smartmeter", "phases", "settings"], tuple[SensorEntityDescription, ...]
] = {
SERVICE_SMARTMETER: (
SensorEntityDescription(
key="gas_consumption",
name="Gas Consumption",
entity_registry_enabled_default=False,
native_unit_of_measurement=VOLUME_CUBIC_METERS,
device_class=SensorDeviceClass.GAS,
state_class=SensorStateClass.TOTAL_INCREASING,
),
SensorEntityDescription(
key="power_consumption",
name="Power Consumption",
native_unit_of_measurement=POWER_WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="energy_consumption_high",
name="Energy Consumption - High Tariff",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
SensorEntityDescription(
key="energy_consumption_low",
name="Energy Consumption - Low Tariff",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
SensorEntityDescription(
key="power_production",
name="Power Production",
native_unit_of_measurement=POWER_WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="energy_production_high",
name="Energy Production - High Tariff",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
SensorEntityDescription(
key="energy_production_low",
name="Energy Production - Low Tariff",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
SensorEntityDescription(
key="energy_tariff_period",
name="Energy Tariff Period",
icon="mdi:calendar-clock",
),
),
SERVICE_PHASES: (
SensorEntityDescription(
key="voltage_phase_l1",
name="Voltage Phase L1",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
device_class=SensorDeviceClass.VOLTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="voltage_phase_l2",
name="Voltage Phase L2",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
device_class=SensorDeviceClass.VOLTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="voltage_phase_l3",
name="Voltage Phase L3",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
device_class=SensorDeviceClass.VOLTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="current_phase_l1",
name="Current Phase L1",
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
device_class=SensorDeviceClass.CURRENT,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="current_phase_l2",
name="Current Phase L2",
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
device_class=SensorDeviceClass.CURRENT,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="current_phase_l3",
name="Current Phase L3",
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
device_class=SensorDeviceClass.CURRENT,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="power_consumed_phase_l1",
name="Power Consumed Phase L1",
native_unit_of_measurement=POWER_WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="power_consumed_phase_l2",
name="Power Consumed Phase L2",
native_unit_of_measurement=POWER_WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="power_consumed_phase_l3",
name="Power Consumed Phase L3",
native_unit_of_measurement=POWER_WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="power_produced_phase_l1",
name="Power Produced Phase L1",
native_unit_of_measurement=POWER_WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="power_produced_phase_l2",
name="Power Produced Phase L2",
native_unit_of_measurement=POWER_WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="power_produced_phase_l3",
name="Power Produced Phase L3",
native_unit_of_measurement=POWER_WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
),
SERVICE_SETTINGS: (
SensorEntityDescription(
key="gas_consumption_price",
name="Gas Consumption Price",
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=f"{CURRENCY_EURO}/{VOLUME_CUBIC_METERS}",
),
SensorEntityDescription(
key="energy_consumption_price_low",
name="Energy Consumption Price - Low",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=f"{CURRENCY_EURO}/{ENERGY_KILO_WATT_HOUR}",
),
SensorEntityDescription(
key="energy_consumption_price_high",
name="Energy Consumption Price - High",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=f"{CURRENCY_EURO}/{ENERGY_KILO_WATT_HOUR}",
),
SensorEntityDescription(
key="energy_production_price_low",
name="Energy Production Price - Low",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=f"{CURRENCY_EURO}/{ENERGY_KILO_WATT_HOUR}",
),
SensorEntityDescription(
key="energy_production_price_high",
name="Energy Production Price - High",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=f"{CURRENCY_EURO}/{ENERGY_KILO_WATT_HOUR}",
),
),
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up P1 Monitor Sensors based on a config entry."""
async_add_entities(
P1MonitorSensorEntity(
coordinator=hass.data[DOMAIN][entry.entry_id],
description=description,
service_key=service_key,
name=entry.title,
service=SERVICES[service_key],
)
for service_key, service_sensors in SENSORS.items()
for description in service_sensors
)
class P1MonitorSensorEntity(CoordinatorEntity, SensorEntity):
"""Defines an P1 Monitor sensor."""
coordinator: P1MonitorDataUpdateCoordinator
def __init__(
self,
*,
coordinator: P1MonitorDataUpdateCoordinator,
description: SensorEntityDescription,
service_key: Literal["smartmeter", "phases", "settings"],
name: str,
service: str,
) -> None:
"""Initialize P1 Monitor sensor."""
super().__init__(coordinator=coordinator)
self._service_key = service_key
self.entity_id = f"{SENSOR_DOMAIN}.{name}_{description.key}"
self.entity_description = description
self._attr_unique_id = (
f"{coordinator.config_entry.entry_id}_{service_key}_{description.key}"
)
self._attr_device_info = DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={
(DOMAIN, f"{coordinator.config_entry.entry_id}_{service_key}")
},
configuration_url=f"http://{coordinator.config_entry.data[CONF_HOST]}",
manufacturer="P1 Monitor",
name=service,
)
@property
def native_value(self) -> StateType:
"""Return the state of the sensor."""
value = getattr(
self.coordinator.data[self._service_key], self.entity_description.key
)
if isinstance(value, str):
return value.lower()
return value
| [
"[email protected]"
] | |
a0939899f8fceb40a4dd8a2deed214d8879e78cc | 0d8486c1d55c40bebea7c5428930f18165d2d0e9 | /tests/wasp1/AllAnswerSets/aggregates_count_propagation_3.test.py | 27e773802ad09d5c6d3abad9eda4941834b6af27 | [
"Apache-2.0"
] | permissive | bernardocuteri/wasp | 6f81bf6aa8fb273c91bbf68ecce4ecb195a55953 | 05c8f961776dbdbf7afbf905ee00fc262eba51ad | refs/heads/master | 2021-06-08T11:58:25.080818 | 2020-10-05T16:57:37 | 2020-10-05T16:57:37 | 124,245,808 | 0 | 0 | Apache-2.0 | 2018-03-07T14:13:16 | 2018-03-07T14:13:16 | null | UTF-8 | Python | false | false | 183 | py | input = """
a(1) v a(2).
a(3) v a(4).
ok :- not #count{T: a(T)} > 2, #count{V : a(V)} > 1.
"""
output = """
{a(1), a(3), ok}
{a(1), a(4), ok}
{a(2), a(3), ok}
{a(2), a(4), ok}
"""
| [
"[email protected]"
] | |
cc445242ba9b1c7f923e254cdb7bd7111f34354d | 5aec124d5c006fab649d562603e00cff5fc8eafb | /HCU-311-V1.0.1/sailing_robot_control/build/ball_detected/catkin_generated/pkg.develspace.context.pc.py | 64c847321c4ed3ddf0ed6b3f1d89f11041bd7378 | [] | no_license | supcon-nzic/HCU311_Sailing_Robot | d935e0e2383b0f17eeeec42a94adc027ba22a9f1 | e42d8c10455e6bf88c7a9e9a3be65b7afd12466e | refs/heads/master | 2021-05-21T08:36:59.693130 | 2020-05-27T02:29:56 | 2020-05-27T02:29:56 | 252,621,616 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/root/HCU-311/sailing_robot_control/devel/include".split(';') if "/root/HCU-311/sailing_robot_control/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ball_detected"
PROJECT_SPACE_DIR = "/root/HCU-311/sailing_robot_control/devel"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
8688ca373fba0c6353719eb5259f982eb9d99d70 | 00cd46c5722fbb4623d8cefc33bbce6e4c6bf970 | /BFS/120.Word Ladder/Solution_BFS.py | 5e0d498187e3ce628b96de5545b6fd6f4933da91 | [
"MIT"
] | permissive | jxhangithub/lintcode | 9126d0d951cdc69cd5f061799313f1a96ffe5ab8 | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | refs/heads/master | 2022-04-02T22:02:57.515169 | 2020-02-26T21:32:02 | 2020-02-26T21:32:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,350 | py | from collections import deque
class Solution:
"""
@param: start: a string
@param: end: a string
@param: dict: a set of string
@return: An integer
"""
def ladderLength(self, start, end, dictionary):
# write your code here
if start == end:
return 1
dictionary.add(end)
steps = {start:1}
queue = deque([start])
while queue:
word = queue.popleft()
if word == end:
return steps[word]
for candidate in self._next_words(word):
if candidate not in dictionary or candidate in steps:
continue
queue.append(candidate)
steps[candidate] = steps[word] + 1
return 0
# O(26 * L^2)
# L is the length of word
def _next_words(self, word):
words = []
for i in range(len(word)):
left, right = word[:i], word[i + 1:]
for char in 'abcdefghijklmnopqrstuvwxyz':
if word[i] == char:
continue
words.append(left + char + right)
return words
# 分层遍历
class Solution:
"""
@param: start: a string
@param: end: a string
@param: dict: a set of string
@return: An integer
"""
def ladderLength(self, start, end, dict):
dict.add(end)
queue = collections.deque([start])
visited = set([start])
distance = 0
while queue:
distance += 1
for i in range(len(queue)):
word = queue.popleft()
if word == end:
return distance
for next_word in self.get_next_words(word):
if next_word not in dict or next_word in visited:
continue
queue.append(next_word)
visited.add(next_word)
return 0
# O(26 * L^2)
# L is the length of word
def get_next_words(self, word):
words = []
for i in range(len(word)):
left, right = word[:i], word[i + 1:]
for char in 'abcdefghijklmnopqrstuvwxyz':
if word[i] == char:
continue
words.append(left + char + right)
return words
| [
"[email protected]"
] | |
98e0cfbb25ce168aee899791913abb63cb37db2f | 94e9bdf9a79b63d29f6d4cd0d299feaaaf2f346f | /tp/gremlin.py | c62a5cb9bba5e6e63ebcb0f3eff7f4f2f1872638 | [
"Apache-2.0"
] | permissive | BITPlan/pyjanusgraph | 797fe7940672bef939b6119727811fd99b6271ca | b7166250a96c5d1cc919a821269ca6740e50c510 | refs/heads/master | 2022-04-14T19:34:26.342483 | 2020-04-13T09:41:47 | 2020-04-13T09:41:47 | 255,103,518 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,841 | py | '''
Created on 2020-03-30
@author: wf
'''
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from gremlin_python.structure.graph import Graph
from shutil import copyfile
import os
import csv
class RemoteGremlin(object):
'''
helper for remote gremlin connections
:ivar server: the server to connect to
:ivar port: the port to connect to
:ivar sharepoint: the directory that is the shared with the janusgraph instance e.g. via a docker bind/mount or volume
:ivar sharepath: the path o the sharepoint as seens by the janusgraph server
'''
debug=False
def __init__(self, server='localhost', port=8182):
'''
construct me with the given server and port
Args:
server(str): the server to use
port(int): the port to use
'''
self.server=server
self.port=port
def setSharepoint(self,sharepoint,sharepath):
'''
set up a sharepoint
Args:
sharepoint(str): the directory that is the shared with the janusgraph instance e.g. via a docker bind/mount or volume
sharepath(str): the path o the sharepoint as seens by the janusgraph server
'''
self.sharepoint=sharepoint
self.sharepath=sharepath
def share(self,file):
'''
share the given file and return the path as seen by the server
Args:
file(str): path to the file to share
'''
fbase=os.path.basename(file)
target=self.sharepoint+fbase
if RemoteGremlin.debug:
print("copying %s to %s" % (file,target))
copyfile(file,target)
return self.sharepath+fbase
def open(self):
'''
open the remote connection
Returns:
GraphTraversalSource: the remote graph traversal source
'''
self.graph = Graph()
self.url='ws://%s:%s/gremlin' % (self.server,self.port)
self.connection = DriverRemoteConnection(self.url, 'g')
# The connection should be closed on shut down to close open connections with connection.close()
self.g = self.graph.traversal().withRemote(self.connection)
return self.g
def close(self):
'''
close the remote connection
'''
self.connection.close()
def clean(self):
'''
clean the graph database by removing all vertices
'''
# drop the existing content of the graph
self.g.V().drop().iterate()
class TinkerPopAble(object):
'''
mixin for classes to store and retrieve from tinkerpop graph database
'''
debug=False
def storeFields(self,fieldList):
'''
define the fields to be stored as tinkerpop vertice properties
Args:
fieldList(list): list of fields to be stored
'''
if not hasattr(self,'tpfields'):
self.tpfields={}
fields=vars(self)
for field in fieldList:
self.tpfields[field]=fields[field]
def toVertex(self,g):
'''
create a vertex from me
Args:
g(GraphTraversalSource): where to add me as a vertex
'''
label=type(self).__name__;
t=g.addV(label)
if TinkerPopAble.debug:
print(label)
tpfields=TinkerPopAble.fields(self)
for name,value in tpfields.items():
if TinkerPopAble.debug:
print("\t%s=%s" % (name,value))
if value is not None:
t=t.property(name,value)
t.iterate()
def fromMap(self,pMap):
'''
fill my attributes from the given pMap dict
Args:
pmap(dict): the dict to fill my attributes from
'''
for name,value in pMap.items():
self.__setattr__(name, value[0]) #
@staticmethod
def fields(instance):
'''
Returns:
dict: either the vars of the instance or the fields specified by the tpfields attribute
'''
# if there is a pre selection of fields store only these
if hasattr(instance,'tpfields'):
tpfields=instance.tpfields
else:
# else use all fields
tpfields=vars(instance)
return tpfields
@staticmethod
def writeCSV(csvfileName,objectList,fieldnames=None):
'''
write the given objectList to a CSV file
Args:
csvfileName(str): the path for the CSV File to write to
objectList(list): a list of instances for which CSV lines should be created
fieldnames(list): an optional list of fieldnames - if set to None the fields will be derived from the first instance in the objectList
'''
if fieldnames is None:
if len(objectList)<1:
raise("writCSV needs at least one object in ObjectList when fieldnames are not specified")
headerInstance=objectList[0]
fieldnames=TinkerPopAble.fields(headerInstance).keys()
with open(csvfileName, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for instance in objectList:
rowdict={}
for fieldname in fieldnames:
fields=TinkerPopAble.fields(instance)
rowdict[fieldname]=fields[fieldname]
writer.writerow(rowdict)
@staticmethod
def cache(rg,gfile,clazz,objectList,initFunction):
'''
generic caching
Args:
gfile(str): the graph storage file
clazz(class): the class of the objects in the objectList
objectList(list): a list of instances to fill or read
initFunction(function): a function to call to fill the cache
'''
g=rg.g
cachefile=rg.sharepoint+gfile
clazzname=clazz.__name__
if os.path.isfile(cachefile):
g.io(rg.sharepath+gfile).read().iterate()
for pMap in g.V().hasLabel(clazzname).valueMap().toList():
if TinkerPopAble.debug:
print (pMap)
instance=clazz.ofMap(pMap)
objectList.append(instance)
if TinkerPopAble.debug:
print (instance)
else:
initFunction()
for instance in objectList:
if TinkerPopAble.debug:
print(instance)
instance.toVertex(g)
g.io(rg.sharepath+gfile).write().iterate()
return cachefile
| [
"[email protected]"
] | |
b75a0c4586b96daeffc4f355e5573a95cc34c38e | 755384a965d9a8b781f1ad4f9f9656395ca9ac26 | /neurora/isc_cal.py | 4b3ea27865519084c9b74c94f0f3358ef6a92fe5 | [
"MIT"
] | permissive | guishuyunye-lyw/NeuroRA | 69134dd85d98328b250ef070f781ab8147763599 | 16bccb80d0b537c4a7c3574c296331a6a855feca | refs/heads/master | 2021-03-06T06:38:48.302269 | 2020-05-19T05:05:17 | 2020-05-19T05:05:17 | 246,186,748 | 3 | 0 | MIT | 2020-03-10T02:11:12 | 2020-03-10T02:11:11 | null | UTF-8 | Python | false | false | 8,480 | py | # -*- coding: utf-8 -*-
' a module for calculating the Inter Subject Correlation based on neural data '
__author__ = 'Zitong Lu'
import numpy as np
import math
from scipy.stats import pearsonr
np.seterr(divide='ignore', invalid='ignore')
' a function for calculating the inter subject correlation (ISC) '
def isc(data, time_win=5, time_step=5):
"""
Calculate the inter subject correlation (ISC)
Parameters
----------
data : array
The neural data.
The shape of data must be [n_subs, n_chls, n_ts]. n_subs, n_chls, n_ts represent the number of subjects, the
number of channels and the number of time-points.
time_win : int. Default is 5.
Set a time-window for calculating the STPS for different time-points.
If time_win=5, that means each calculation process based on 5 time-points.
time_step : int. Default is 5.
The time step size for each time of calculating.
Returns
-------
isc : array
The ISC.
The shape of isc is [n_subs!/(2!*(n_subs-2)!), n_chls, int((n_ts-time_win)/time_step)+1, 2]. n_subs, n_chls,
n_ts represent the number of subjects, the number of channels and the number of time-points. 2 represents a
r-value and a p-value.
"""
# get the number of subjects, channels, time-points
subs, chls, ts = np.shape(data)
# the time-points for calculating the ISC
ts = int((ts - time_win) / time_step) + 1
# the number of pairs among n_subs
if subs > 2:
n = int(math.factorial(subs)/(2*math.factorial(subs-2)))
if subs == 2:
n = 1
# initialize the corrs
isc = np.zeros([n, chls, ts, 2], dtype=np.float)
nindex = 0
# calculate the ISC
for i in range(subs):
for j in range(subs):
if i < j:
for k in range(chls):
for l in range(ts):
rp = pearsonr(data[i, k, l*time_step:l*time_step+time_win], data[j, k, l*time_step:l*time_step+time_win])
isc[nindex, k, l] = rp
nindex = nindex + 1
return isc
' a function for calculating the inter subject correlation (ISC) for fMRI (searchlight) '
def isc_fmri(fmri_data, ksize=[3, 3, 3], strides=[1, 1, 1]):
"""
Calculate the inter subject correlation (ISC) for fMRI (searchlight)
Parameters
----------
fmri_data : array
The fmri data.
The shape of fmri_data must be [n_ts, n_subs, nx, ny, nz]. n_ts, nx, ny, nz represent the number of time-points,
the number of subs & the size of fMRI-img, respectively.
ksize : array or list [kx, ky, kz]. Default is [3, 3, 3].
The size of the fMRI-img.
nx, ny, nz represent the number of voxels along the x, y, z axis.
strides : array or list [sx, sy, sz]. Default is [1, 1, 1].
The strides for calculating along the x, y, z axis.
Returns
-------
isc : array
The ISC.
The shape of isc is [n_ts, n_subs!/(2!*(n_subs-2)!), n_x, n_y, n_z, 2]. n_ts, n_subs, n_x, n_y, n_z represent
the number of time-points, the number of subjects, the number of calculation units for searchlight along the x,
y, z axis. 2 represent a r-value and a p-value.
Notes
-----
The size of the calculation units should at least be [3, 3, 3].
"""
# get the number of time-points, subjects and the size of the fMRI-img
nts, nsubs, nx, ny, nz = np.shape(fmri_data)
# the size of the calculation units for searchlight
kx = ksize[0]
ky = ksize[1]
kz = ksize[2]
if kx+ky+kz < 9:
return print("The size of the calculation units is too small.")
# strides for calculating along the x, y, z axis
sx = strides[0]
sy = strides[1]
sz = strides[2]
# calculate the number of the calculation units
n_x = int((nx - kx) / sx) + 1
n_y = int((ny - ky) / sy) + 1
n_z = int((nz - kz) / sz) + 1
# initialize the data for calculating the ISC
data = np.full([nts, nsubs, n_x, n_y, n_z, kx * ky * kz], np.nan)
# assignment
for t in range(nts):
for sub in range(nsubs):
for x in range(n_x):
for y in range(n_y):
for z in range(n_z):
# record the index in a calculation unit
index = 0
for k1 in range(kx):
for k2 in range(ky):
for k3 in range(kz):
data[t, sub, x, y, z, index] = fmri_data[t, sub, x + k1, y + k2, z + k3]
index = index + 1
# the number of pairs among n_subs
if nsubs > 2:
n = int(math.factorial(nsubs) / (2 * math.factorial(nsubs - 2)))
if nsubs == 2:
n = 1
# initialize the ISC
subisc = np.full([nts, n, n_x, n_y, n_z, 2], np.nan)
# calculate the ISC
for t in range(nts):
nindex = 0
for i in range(nsubs):
for j in range(nsubs):
if i < j:
for x in range(n_x):
for y in range(n_y):
for z in range(n_z):
# no NaN
if (np.isnan(data[t, i, x, y, z]).any() == False) and (np.isnan(data[t, j, x, y, z]).any() == False):
# calculate the Pearson Coefficient and absolute the result
subisc[t, nindex, x, y, z] = pearsonr(data[t, i, x, y, z], data[t, j, x, y, z])
nindex = nindex + 1
return subisc
' a function for calculating the inter subject correlation (ISC) for fMRI (for ROI) '
def isc_fmri_roi(fmri_data, mask_data):
"""
Calculate the inter subject correlation (ISC) for fMRI (for ROI)
Parameters
----------
fmri_data : array
The fmri data.
The shape of fmri_data must be [n_ts, n_subs, nx, ny, nz]. n_ts, nx, ny, nz represent the number of time-points,
the number of subs & the size of fMRI-img, respectively.
mask_data : array [nx, ny, nz].
The mask data for region of interest (ROI).
The size of the fMRI-img. nx, ny, nz represent the number of voxels along the x, y, z axis.
Returns
-------
isc : array
The ISC.
The shape of corrs is [n_ts, n_subs!/(2!*(n_subs-2)!), 2]. n_ts, n_subs represent the number of time-points,
the number of subjects. 2 represent a r-value and a p-value.
Notes
-----
The size of the calculation units should at least be [3, 3, 3].
"""
# get the number of time-points, subjects and the size of the fMRI-img
nts, nsubs, nx, ny, nz = np.shape(fmri_data)
# record the number of valid voxels in ROI
nmask = 0
for i in range(nx):
for j in range(ny):
for k in range(nz):
# not 0 or NaN
if (mask_data[i, j, k] != 0) and (math.isnan(mask_data[i, j, k]) == False):
nmask = nmask + 1
# initialize the data for calculating the ISC
data = np.full([nts, nsubs, nmask], np.nan)
# assignment
for t in range(nts):
for sub in range(nsubs):
# record the index of the valid voxels for calculating
n = 0
for i in range(nx):
for j in range(ny):
for k in range(nz):
# not 0 or NaN
if (mask_data[i, j, k] != 0) and (math.isnan(mask_data[i, j, k]) == False):
data[t, sub, n] = fmri_data[t, sub, i, j, k]
n = n + 1
# the number of pairs among n_subs
if nsubs > 2:
n = int(math.factorial(nsubs) / (2 * math.factorial(nsubs - 2)))
if nsubs == 2:
n = 1
# initialize the ISC
subisc = np.full([nts, n, 2], np.nan)
# calculate the ISC
for t in range(nts):
nindex = 0
for i in range(nsubs):
for j in range(nsubs):
if i < j:
if (np.isnan(data[t, i]).any() == False) and (np.isnan(data[t, j]).any() == False):
# calculate the Pearson Coefficient and absolute the result
subisc[t, nindex] = pearsonr(data[t, i], data[t, j])
nindex = nindex + 1
return subisc
| [
"[email protected]"
] | |
153d9680ab2bacbc55cde69f5591ff4263870773 | 8ac5fa643e54f6aac8af600eddbeb4c074095b2b | /tspdb/tests/testScriptMultiSynthControlSVD.py | ee629ebfbb382b0d0cbde15d0eeeb5174b607eb1 | [
"Apache-2.0"
] | permissive | AbdullahO/tspdb | 00acca3b29a054f02ac07a0243c04ba575af0a19 | 6ba75833a128d4036caad488d144fb6b0ba682e6 | refs/heads/master | 2023-05-25T09:44:19.856796 | 2023-05-18T17:25:48 | 2023-05-18T17:25:48 | 210,507,080 | 179 | 60 | Apache-2.0 | 2023-02-10T23:10:00 | 2019-09-24T03:51:25 | Jupyter Notebook | UTF-8 | Python | false | false | 12,144 | py | #############################################################
#
# Multi-Dimensional Robust Synthetic Control Tests
# (based on SVD)
#
# Generates two metrics and compared the RMSE for forecasts
# for each metric using RSC against mRSC.
#
# Test are based on random data so it is advised to run
# several times. Also note that in this setting RSC is
# also expected to do well. mRSC is expected to help but
# cannot be guaranteed to always be better.
#
# You need to ensure that this script is called from
# the tslib/ parent directory or tslib/tests/ directory:
#
# 1. python tests/testScriptMultiSynthControlSVD.py
# 2. python testScriptMultiSynthControlSVD.py
#
#############################################################
import sys, os
sys.path.append("../..")
sys.path.append("..")
sys.path.append(os.getcwd())
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import copy
from tslib.src import tsUtils
from tslib.src.synthcontrol.syntheticControl import RobustSyntheticControl
from tslib.src.synthcontrol.multisyntheticControl import MultiRobustSyntheticControl
def simpleFunctionOne(theta, rho):
alpha = 0.7
exp_term = np.exp((-1.0 *theta) - rho - (alpha * theta * rho))
exp_term2 = np.exp(-1.0 *alpha * theta * rho)
p = 10.0 * float(1.0 / (1.0 + exp_term)) + 10.0/exp_term2
return p
def simpleFunctionTwo(theta, rho):
alpha = 0.5
exp_term = np.exp((-1.0 *theta) - rho - (alpha * theta * rho))
p = 10.0 * float(1.0 / (1.0 + exp_term))
return p
def generateDataMatrix(N, T, rowRank, colRank, genFunction, rowParams, colParams):
matrix = np.zeros([N, T])
for i in range(0, N):
for j in range(0, T):
matrix[i, j] = genFunction(rowParams[i], colParams[j])
return matrix
def generateFirstRow(matrix, weights):
(N, T) = np.shape(matrix)
assert(len(weights) == N)
weights = weights.reshape([N, 1])
weights = weights/np.sum(weights)
return np.dot(weights.T, matrix)
def generateOneMetricMatrix(N, T, TrainingEnd, rowRank, colRank, genFunction, trueWeights, rowParams, colParams):
matrix = generateDataMatrix(N, T, rowRank, colRank, genFunction, rowParams, colParams)
firstRow = generateFirstRow(matrix, trueWeights)
meanMatrix = np.zeros([N+1, T]) #np.concatenate([firstRow, matrix], axis=0)
meanMatrix[0, :] = firstRow
meanMatrix[1:, :] = matrix
#print(np.linalg.matrix_rank(meanMatrix))
noiseMatrix = np.random.normal(0.0, 1.0, [N+1, T])
#print(np.linalg.matrix_rank(noiseMatrix))
observationMatrix = meanMatrix + noiseMatrix
#print(np.linalg.matrix_rank(observationMatrix))
# convert to dataframes
trainingDict = {}
testDict = {}
meanTrainingDict = {}
meanTestDict = {}
for i in range(0, N+1):
trainingDict.update({str(i): observationMatrix[i, 0:TrainingEnd]})
meanTrainingDict.update({str(i): meanMatrix[i, 0:TrainingEnd]})
testDict.update({str(i): observationMatrix[i, TrainingEnd:]})
meanTestDict.update({str(i): meanMatrix[i, TrainingEnd:]})
trainDF = pd.DataFrame(data=trainingDict)
testDF = pd.DataFrame(data=testDict)
meanTrainDF = pd.DataFrame(data=meanTrainingDict)
meanTestDF = pd.DataFrame(data=meanTestDict)
#print(np.shape(trainDF), np.shape(testDF))
#print(np.shape(meanTrainDF), np.shape(meanTestDF))
return (observationMatrix, meanMatrix, trainDF, testDF, meanTrainingDict, meanTestDict)
def rankComparison():
N = 100
T = 120
TrainingEnd = 100
rowRank = 200
colRank = 200
# generate metric matrices
genFunctionOne = simpleFunctionOne
genFunctionTwo = simpleFunctionTwo
trueWeights = np.random.uniform(0.0, 1.0, N)
trueWeights = trueWeights/np.sum(trueWeights)
thetaArrayParams = np.random.uniform(0.0, 1.0, rowRank)
rhoArrayParams = np.random.uniform(0.0, 1.0, colRank)
rowParams = np.random.choice(thetaArrayParams, N)
colParams = np.random.choice(rhoArrayParams, T)
# metric 1
(observationMatrix, meanMatrix, trainDF, testDF, meanTrainingDict, meanTestDict) = generateOneMetricMatrix(N, T, TrainingEnd, rowRank, colRank, genFunctionOne, trueWeights, rowParams, colParams)
# metric 2
(observationMatrix2, meanMatrix2, trainDF2, testDF2, meanTrainingDict2, meanTestDict2) = generateOneMetricMatrix(N, T, TrainingEnd, rowRank, colRank, genFunctionTwo, trueWeights, rowParams, colParams)
thetaArrayParams = np.random.uniform(0.0, 1.0, rowRank)
rhoArrayParams = np.random.uniform(0.0, 1.0, colRank)
rowParams = np.random.choice(thetaArrayParams, N)
colParams = np.random.choice(rhoArrayParams, T)
# metric 3
(observationMatrix3, meanMatrix3, trainDF3, testDF3, meanTrainingDict3, meanTestDict3) = generateOneMetricMatrix(N, T, TrainingEnd, rowRank, colRank, genFunctionTwo, trueWeights, rowParams, colParams)
# concatenation
matrixA = np.zeros([N+1, 2*T])
matrixA[:, 0:T] = meanMatrix
matrixA[:, T: ] = meanMatrix2
u, s, v = np.linalg.svd(meanMatrix, full_matrices=False)
u, s_, v = np.linalg.svd(meanMatrix2, full_matrices=False)
u, sA, v = np.linalg.svd(matrixA, full_matrices=False)
# print(np.linalg.matrix_rank(meanMatrix))
# print(np.linalg.matrix_rank(meanMatrix2))
# print(np.linalg.matrix_rank(meanMatrix3))
# print(np.linalg.matrix_rank(matrixA))
k = 20
plt.plot(range(0, k), s[0:k], color='magenta', label='metric1')
plt.plot(range(0, k), s_[0:k], color='black', label='metric2')
plt.plot(range(0, k), sA[0:k], '-x', color='red', label='combined')
plt.xlabel('Singular Value Index (largest to smallest)')
plt.ylabel('Singular Value')
plt.title('Diagnostic: Rank Preservation Property')
legend = plt.legend(loc='lower right', shadow=True)
plt.show()
def runAnalysis(N, T, TrainingEnd, rowRank, colRank):
# generate metric matrices
genFunctionOne = simpleFunctionOne
genFunctionTwo = simpleFunctionTwo
trueWeights = np.random.uniform(0.0, 1.0, N)
trueWeights = trueWeights/np.sum(trueWeights)
thetaArrayParams = np.random.uniform(0.0, 1.0, rowRank)
rhoArrayParams = np.random.uniform(0.0, 1.0, colRank)
rowParams = np.random.choice(thetaArrayParams, N)
colParams = np.random.choice(rhoArrayParams, T)
# metric 1
(observationMatrix1, meanMatrix1, trainDF1, testDF1, meanTrainingDict1, meanTestDict1) = generateOneMetricMatrix(N, T, TrainingEnd, rowRank, colRank, genFunctionOne, trueWeights, rowParams, colParams)
# metric 2
(observationMatrix2, meanMatrix2, trainDF2, testDF2, meanTrainingDict2, meanTestDict2) = generateOneMetricMatrix(N, T, TrainingEnd, rowRank, colRank, genFunctionTwo, trueWeights, rowParams, colParams)
keySeriesLabel = '0'
otherSeriesLabels = []
for ind in range(1, N+1):
otherSeriesLabels.append(str(ind))
# RSC analysis
singvals = 8
############################
#### RSC for metric 1
rscmodel1 = RobustSyntheticControl(keySeriesLabel, singvals, len(trainDF1), probObservation=1.0, svdMethod='numpy', otherSeriesKeysArray=otherSeriesLabels)
# fit the model
rscmodel1.fit(trainDF1)
predictionsRSC1 = rscmodel1.predict(testDF1)
rscRMSE1 = np.sqrt(np.mean((predictionsRSC1 - meanTestDict1[keySeriesLabel])**2))
#print("\n\n *** RSC rmse1:")
#print(rscRMSE1)
############################
##### RSC for metric 2
rscmodel2 = RobustSyntheticControl(keySeriesLabel, singvals, len(trainDF2), probObservation=1.0, svdMethod='numpy', otherSeriesKeysArray=otherSeriesLabels)
# fit the model
rscmodel2.fit(trainDF2)
predictionsRSC2 = rscmodel2.predict(testDF2)
rscRMSE2 = np.sqrt(np.mean((predictionsRSC2 - meanTestDict2[keySeriesLabel])**2))
#print("\n\n *** RSC rmse2:")
#print(rscRMSE2)
############################
#### multi RSC model (combined) --
relative_weights = [1.0, 1.0]
# instantiate the model
mrscmodel = MultiRobustSyntheticControl(2, relative_weights, keySeriesLabel, singvals, len(trainDF1), probObservation=1.0, svdMethod='numpy', otherSeriesKeysArray=otherSeriesLabels)
# fit
mrscmodel.fit([trainDF1, trainDF2])
# predict
combinedPredictionsArray = mrscmodel.predict([testDF1[otherSeriesLabels], testDF2[otherSeriesLabels]])
# split the predictions for the metrics
predictionsmRSC_1 = combinedPredictionsArray[0]
predictionsmRSC_2 = combinedPredictionsArray[1]
# compute RMSE
mrscRMSE1 = np.sqrt(np.mean((predictionsmRSC_1 - meanTestDict1[keySeriesLabel])**2))
mrscRMSE2 = np.sqrt(np.mean((predictionsmRSC_2 - meanTestDict2[keySeriesLabel])**2))
#print("\n\n *** mRSC rmse1:")
#print(mrscRMSE1)
#print("\n\n *** mRSC rmse2:")
#print(mrscRMSE1)
return ({"rsc1": rscRMSE1,
"rsc2": rscRMSE2,
"mrsc1": mrscRMSE1,
"mrsc2": mrscRMSE2})
def main():
# diagnostic test for rank preservation (see paper referenced)
rankComparison()
rowRank = 10
colRank = 10
rsc1 = []
rsc1A = []
rsc2 = []
rsc2A = []
mrsc1 = []
mrsc1A = []
mrsc2 = []
mrsc2A = []
# simulating many random tests and varying matrix sizes
N_array = [50, 100, 150, 200, 250, 300]
for N in N_array:
print("**********************************************************")
print(N)
print("**********************************************************")
for T in [30]:
TrainingEnd = int(0.75*T)
rsc1_array = []
rsc1A_array = []
rsc2_array = []
rsc2A_array = []
mrsc1_array = []
mrsc1A_array = []
mrsc2_array = []
mrsc2A_array = []
for iter in range(0, 20):
resDict = runAnalysis(N, T, TrainingEnd, rowRank, colRank)
rsc1_array.append(resDict['rsc1'])
rsc2_array.append(resDict['rsc2'])
mrsc1_array.append(resDict['mrsc1'])
mrsc2_array.append(resDict['mrsc2'])
rsc1.append(np.mean(rsc1_array))
rsc2.append(np.mean(rsc2_array))
mrsc1.append(np.mean(mrsc1_array))
mrsc2.append(np.mean(mrsc2_array))
print("====================================")
print("====================================")
print("Metric # 1:")
print("mRSC - RSC:")
for i in range(0, len(N_array)):
print(i, mrsc1[i] - rsc1[i])
print("Metric # 2:")
print("mRSC - RSC:")
for i in range(0, len(N_array)):
print(i, mrsc2[i] - rsc2[i])
print("====================================")
print("====================================")
print("====================================")
print("====================================")
print("Metric # 1:")
print("mRSC, RSC:")
for i in range(0, len(N_array)):
print(i, mrsc1[i], rsc1[i])
print("Metric # 2:")
print("mRSC, RSC,:")
for i in range(0, len(N_array)):
print(i, mrsc2[i], rsc2[i])
print("====================================")
print("====================================")
# plotting
plt.plot(N_array, mrsc1, color='r', label='mRSC (metricA)')
plt.plot(N_array, mrsc2, color='orange', label='mRSC (metricB)')
plt.plot(N_array, rsc1, '-.', color='blue', label='RSC (metricA)')
plt.plot(N_array, rsc2, '--x',color='magenta', label='RSC (metricB)')
plt.xlabel('N')
plt.ylabel('RMSE')
plt.title('mRSC vs RSC for metricA and metricB')
legend = plt.legend(loc='upper right', shadow=True)
plt.show()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
5fd64adbe388809df800e8d25b4768762ae01b4f | 63b0fed007d152fe5e96640b844081c07ca20a11 | /ABC/ABC200~ABC299/ABC258/b.py | 9df401169330584353a01a0d5b334e77713e1f27 | [] | no_license | Nikkuniku/AtcoderProgramming | 8ff54541c8e65d0c93ce42f3a98aec061adf2f05 | fbaf7b40084c52e35c803b6b03346f2a06fb5367 | refs/heads/master | 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | n = int(input())
a = []
for _ in range(n):
a.append(list(input()))
ans = 0
dx = [-1, -1, -1, 0, 0, 1, 1, 1]
dy = [-1, 0, 1, -1, 1, -1, 0, 1]
for i in range(n):
for j in range(n):
for k in range(8):
tmp = ''
nx = dx[k]
ny = dy[k]
for m in range(n):
tmp += a[i][j]
i += nx
j += ny
i %= n
j %= n
ans = max(ans, int(tmp))
print(ans)
| [
"[email protected]"
] | |
599690c2819ae71b7e0abf4ea5cc3e5adad51af0 | c9c2643d5a548174c5a943765b1400b09e6850c6 | /dandi/tests/test_move.py | 70ead9312b61e6849c4439f10ad84d7a113a1561 | [
"Apache-2.0"
] | permissive | dandi/dandi-cli | c376b5b111fdeddbd0121c946babd6b18fed8550 | 1380865c8ef72b9f4fbc4b9b337267362bb1dbde | refs/heads/master | 2023-08-28T19:35:49.179955 | 2023-08-11T18:05:23 | 2023-08-11T18:05:23 | 202,796,357 | 16 | 17 | Apache-2.0 | 2023-09-12T00:00:47 | 2019-08-16T20:38:46 | Python | UTF-8 | Python | false | false | 32,817 | py | import logging
from pathlib import Path
from typing import Dict, List, Optional, cast
import pytest
from .fixtures import SampleDandiset
from ..dandiapi import RemoteAsset
from ..exceptions import NotFoundError
from ..move import AssetMismatchError, move
@pytest.fixture()
def moving_dandiset(new_dandiset: SampleDandiset) -> SampleDandiset:
for path in [
"file.txt",
"subdir1/apple.txt",
"subdir2/banana.txt",
"subdir2/coconut.txt",
"subdir3/red.dat",
"subdir3/green.dat",
"subdir3/blue.dat",
"subdir4/foo.json",
"subdir5/foo.json",
]:
p = new_dandiset.dspath / path
p.parent.mkdir(parents=True, exist_ok=True)
p.write_text(f"{path}\n")
new_dandiset.upload_kwargs["allow_any_path"] = True
new_dandiset.upload()
return new_dandiset
def check_assets(
sample_dandiset: SampleDandiset,
starting_assets: List[RemoteAsset],
work_on: str,
remapping: Dict[str, Optional[str]],
) -> None:
for asset in starting_assets:
if asset.path in remapping and remapping[asset.path] is None:
# Asset was overwritten
continue
if work_on in ("local", "both") and asset.path in remapping:
assert not (sample_dandiset.dspath / asset.path).exists()
assert (
sample_dandiset.dspath / cast(str, remapping[asset.path])
).read_text() == f"{asset.path}\n"
else:
assert (
sample_dandiset.dspath / asset.path
).read_text() == f"{asset.path}\n"
if work_on in ("remote", "both") and asset.path in remapping:
with pytest.raises(NotFoundError):
sample_dandiset.dandiset.get_asset_by_path(asset.path)
assert (
sample_dandiset.dandiset.get_asset_by_path( # type: ignore[attr-defined]
cast(str, remapping[asset.path])
).blob
== asset.blob # type: ignore[attr-defined]
)
else:
assert (
sample_dandiset.dandiset.get_asset_by_path(asset.path).identifier
== asset.identifier
)
@pytest.mark.parametrize(
"srcs,dest,regex,remapping",
[
(
["file.txt"],
"blob.dat",
False,
{"file.txt": "blob.dat"},
),
(
["file.txt"],
"blob.dat/",
False,
{"file.txt": "blob.dat/file.txt"},
),
(
["file.txt"],
"subdir1",
False,
{"file.txt": "subdir1/file.txt"},
),
(
["file.txt"],
"subdir1/",
False,
{"file.txt": "subdir1/file.txt"},
),
(
["subdir1/apple.txt"],
"subdir2",
False,
{"subdir1/apple.txt": "subdir2/apple.txt"},
),
(
["subdir2"],
"subdir1",
False,
{
"subdir2/banana.txt": "subdir1/subdir2/banana.txt",
"subdir2/coconut.txt": "subdir1/subdir2/coconut.txt",
},
),
(
["file.txt", "subdir2/banana.txt"],
"subdir1",
False,
{
"file.txt": "subdir1/file.txt",
"subdir2/banana.txt": "subdir1/banana.txt",
},
),
(
["file.txt", "subdir2/banana.txt"],
"newdir",
False,
{
"file.txt": "newdir/file.txt",
"subdir2/banana.txt": "newdir/banana.txt",
},
),
(
[r"\.dat$"],
".color",
True,
{
"subdir3/red.dat": "subdir3/red.color",
"subdir3/green.dat": "subdir3/green.color",
"subdir3/blue.dat": "subdir3/blue.color",
},
),
(
[r"^\w+?(\d+)/(.*)\.txt$"],
r"text\1/\2.txt",
True,
{
"subdir1/apple.txt": "text1/apple.txt",
"subdir2/banana.txt": "text2/banana.txt",
"subdir2/coconut.txt": "text2/coconut.txt",
},
),
(
["subdir1/apple.txt"],
".",
False,
{"subdir1/apple.txt": "apple.txt"},
),
],
)
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move(
monkeypatch: pytest.MonkeyPatch,
moving_dandiset: SampleDandiset,
srcs: List[str],
dest: str,
regex: bool,
remapping: Dict[str, Optional[str]],
work_on: str,
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
*srcs,
dest=dest,
regex=regex,
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
check_assets(moving_dandiset, starting_assets, work_on, remapping)
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_skip(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
"file.txt",
"subdir4/foo.json",
dest="subdir5",
work_on=work_on,
existing="skip",
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
check_assets(
moving_dandiset, starting_assets, work_on, {"file.txt": "subdir5/file.txt"}
)
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
@pytest.mark.parametrize("kwargs", [{"existing": "error"}, {}])
def test_move_error(
monkeypatch: pytest.MonkeyPatch,
moving_dandiset: SampleDandiset,
work_on: str,
kwargs: Dict[str, str],
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(ValueError) as excinfo:
move(
"file.txt",
"subdir4/foo.json",
dest="subdir5",
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
**kwargs, # type: ignore[arg-type]
)
assert (
str(excinfo.value) == "Cannot move 'subdir4/foo.json' to 'subdir5/foo.json', as"
f" {'remote' if work_on == 'remote' else 'local'} destination already exists"
)
check_assets(moving_dandiset, starting_assets, work_on, {})
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_overwrite(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
"file.txt",
"subdir4/foo.json",
dest="subdir5",
work_on=work_on,
existing="overwrite",
devel_debug=True,
dandi_instance=moving_dandiset.api.instance_id,
)
check_assets(
moving_dandiset,
starting_assets,
work_on,
{
"file.txt": "subdir5/file.txt",
"subdir4/foo.json": "subdir5/foo.json",
"subdir5/foo.json": None,
},
)
def test_move_no_srcs(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(ValueError) as excinfo:
move(
dest="nowhere",
work_on="both",
dandi_instance=moving_dandiset.api.instance_id,
)
assert str(excinfo.value) == "No source paths given"
check_assets(moving_dandiset, starting_assets, "both", {})
def test_move_regex_multisrcs(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(ValueError) as excinfo:
move(
r"\.txt",
r"\.dat",
dest=".blob",
regex=True,
work_on="both",
dandi_instance=moving_dandiset.api.instance_id,
)
assert (
str(excinfo.value) == "Cannot take multiple source paths when `regex` is True"
)
check_assets(moving_dandiset, starting_assets, "both", {})
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_multisrcs_file_dest(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(ValueError) as excinfo:
move(
"file.txt",
"subdir1/apple.txt",
dest="subdir2/banana.txt",
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
)
assert (
str(excinfo.value)
== "Cannot take multiple source paths when destination is a file"
)
check_assets(moving_dandiset, starting_assets, work_on, {})
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_folder_src_file_dest(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(ValueError) as excinfo:
move(
"subdir1",
dest="subdir2/banana.txt",
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
)
assert str(excinfo.value) == "Cannot move folder 'subdir1' to a file path"
check_assets(moving_dandiset, starting_assets, work_on, {})
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_nonexistent_src(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(NotFoundError) as excinfo:
move(
"file.txt",
"subdir1/avocado.txt",
dest="subdir2/",
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
)
assert (
str(excinfo.value)
== f"No asset at {'remote' if work_on == 'remote' else 'local'} path 'subdir1/avocado.txt'"
)
check_assets(moving_dandiset, starting_assets, work_on, {})
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_file_slash_src(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(ValueError) as excinfo:
move(
"file.txt",
"subdir1/apple.txt/",
dest="subdir2/",
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
)
assert (
str(excinfo.value)
== f"{'Remote' if work_on == 'remote' else 'Local'} path 'subdir1/apple.txt/' is a file"
)
check_assets(moving_dandiset, starting_assets, work_on, {})
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_file_slash_dest(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(ValueError) as excinfo:
move(
"file.txt",
dest="subdir1/apple.txt/",
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
)
assert (
str(excinfo.value)
== f"{'Remote' if work_on == 'remote' else 'Local'} path 'subdir1/apple.txt/' is a file"
)
check_assets(moving_dandiset, starting_assets, work_on, {})
def test_move_regex_no_match(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(ValueError) as excinfo:
move(
"no-match",
dest="nowhere",
regex=True,
work_on="both",
dandi_instance=moving_dandiset.api.instance_id,
)
assert (
str(excinfo.value)
== "Regular expression 'no-match' did not match any local paths"
)
check_assets(moving_dandiset, starting_assets, "both", {})
def test_move_regex_collision(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(ValueError) as excinfo:
move(
r"^\w+/foo\.json$",
dest="data/data.json",
regex=True,
work_on="both",
dandi_instance=moving_dandiset.api.instance_id,
)
assert (
str(excinfo.value)
== "Local assets 'subdir4/foo.json' and 'subdir5/foo.json' would both"
" be moved to 'data/data.json'"
)
check_assets(moving_dandiset, starting_assets, "both", {})
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_regex_some_to_self(
caplog: pytest.LogCaptureFixture,
monkeypatch: pytest.MonkeyPatch,
moving_dandiset: SampleDandiset,
work_on: str,
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
r"(.+[123])/([^.]+)\.(.+)",
dest=r"\1/\2.dat",
regex=True,
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
for path in ["subdir3/red.dat", "subdir3/green.dat", "subdir3/blue.dat"]:
for where in ["local", "remote"] if work_on == "both" else [work_on]:
assert (
"dandi",
logging.DEBUG,
f"Would move {where} asset {path!r} to itself; ignoring",
) in caplog.record_tuples
check_assets(
moving_dandiset,
starting_assets,
work_on,
{
"subdir1/apple.txt": "subdir1/apple.dat",
"subdir2/banana.txt": "subdir2/banana.dat",
"subdir2/coconut.txt": "subdir2/coconut.dat",
},
)
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_from_subdir(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath / "subdir1")
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
"../file.txt",
"apple.txt",
dest="../subdir2",
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
check_assets(
moving_dandiset,
starting_assets,
work_on,
{
"file.txt": "subdir2/file.txt",
"subdir1/apple.txt": "subdir2/apple.txt",
},
)
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_in_subdir(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath / "subdir1")
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
"apple.txt",
dest="macintosh.txt",
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
check_assets(
moving_dandiset,
starting_assets,
work_on,
{"subdir1/apple.txt": "subdir1/macintosh.txt"},
)
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_from_subdir_abspaths(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath / "subdir1")
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(NotFoundError) as excinfo:
move(
"file.txt",
"subdir1/apple.txt",
dest="subdir2",
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
)
assert (
str(excinfo.value)
== f"No asset at {'remote' if work_on == 'remote' else 'local'} path 'file.txt'"
)
check_assets(moving_dandiset, starting_assets, work_on, {})
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_from_subdir_as_dot(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath / "subdir1")
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(ValueError) as excinfo:
move(
".",
dest="../subdir2",
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
assert str(excinfo.value) == "Cannot move current working directory"
check_assets(moving_dandiset, starting_assets, work_on, {})
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_from_subdir_regex(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath / "subdir1")
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
r"\.txt",
dest=".dat",
regex=True,
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
check_assets(
moving_dandiset,
starting_assets,
work_on,
{"subdir1/apple.txt": "subdir1/apple.dat"},
)
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_from_subdir_regex_no_changes(
caplog: pytest.LogCaptureFixture,
monkeypatch: pytest.MonkeyPatch,
moving_dandiset: SampleDandiset,
work_on: str,
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath / "subdir1")
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
r"\.txt",
dest=".txt",
regex=True,
work_on=work_on,
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
assert ("dandi", logging.INFO, "Nothing to move") in caplog.record_tuples
check_assets(moving_dandiset, starting_assets, work_on, {})
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_dandiset_path(
monkeypatch: pytest.MonkeyPatch,
moving_dandiset: SampleDandiset,
tmp_path: Path,
work_on: str,
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(tmp_path)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
"file.txt",
"subdir2/banana.txt",
dest="subdir1",
work_on=work_on,
dandiset=moving_dandiset.dspath,
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
check_assets(
moving_dandiset,
starting_assets,
work_on,
{
"file.txt": "subdir1/file.txt",
"subdir2/banana.txt": "subdir1/banana.txt",
},
)
@pytest.mark.parametrize("work_on", ["auto", "remote"])
def test_move_dandiset_url(
monkeypatch: pytest.MonkeyPatch,
moving_dandiset: SampleDandiset,
tmp_path: Path,
work_on: str,
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(tmp_path)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
"file.txt",
"subdir2/banana.txt",
dest="subdir1",
work_on=work_on,
dandiset=moving_dandiset.dandiset.api_url,
devel_debug=True,
)
check_assets(
moving_dandiset,
starting_assets,
"remote",
{
"file.txt": "subdir1/file.txt",
"subdir2/banana.txt": "subdir1/banana.txt",
},
)
def test_move_work_on_auto(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, tmp_path: Path
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
"file.txt",
"subdir2/banana.txt",
dest="subdir1",
work_on="auto",
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
check_assets(
moving_dandiset,
starting_assets,
"both",
{
"file.txt": "subdir1/file.txt",
"subdir2/banana.txt": "subdir1/banana.txt",
},
)
@pytest.mark.parametrize("work_on", ["auto", "both", "local", "remote"])
def test_move_not_dandiset(
monkeypatch: pytest.MonkeyPatch, tmp_path: Path, work_on: str
) -> None:
monkeypatch.chdir(tmp_path)
with pytest.raises(ValueError) as excinfo:
move("file.txt", "subdir2/banana.txt", dest="subdir1", work_on=work_on)
assert str(excinfo.value) == f"{tmp_path.absolute()}: not a Dandiset"
def test_move_local_delete_empty_dirs(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath / "subdir4")
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
"../subdir1/apple.txt",
"../subdir2/banana.txt",
"foo.json",
dest="../subdir3",
work_on="local",
devel_debug=True,
)
check_assets(
moving_dandiset,
starting_assets,
"local",
{
"subdir1/apple.txt": "subdir3/apple.txt",
"subdir2/banana.txt": "subdir3/banana.txt",
"subdir4/foo.json": "subdir3/foo.json",
},
)
assert not (moving_dandiset.dspath / "subdir1").exists()
assert (moving_dandiset.dspath / "subdir2").exists()
assert (moving_dandiset.dspath / "subdir4").exists()
def test_move_both_src_path_not_in_local(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset
) -> None:
(moving_dandiset.dspath / "subdir2" / "banana.txt").unlink()
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(AssetMismatchError) as excinfo:
move(
"subdir2",
dest="subdir3",
work_on="both",
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
assert (
str(excinfo.value) == "Mismatch between local and remote Dandisets:\n"
"- Asset 'subdir2/banana.txt' only exists remotely\n"
"- Asset 'subdir2/coconut.txt' only exists remotely"
)
check_assets(moving_dandiset, starting_assets, "both", {"subdir2/banana.txt": None})
def test_move_both_src_path_not_in_remote(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset
) -> None:
(moving_dandiset.dspath / "subdir2" / "mango.txt").write_text("Mango\n")
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(AssetMismatchError) as excinfo:
move(
"subdir2",
dest="subdir3",
work_on="both",
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
assert (
str(excinfo.value) == "Mismatch between local and remote Dandisets:\n"
"- Asset 'subdir2/mango.txt' only exists locally"
)
check_assets(moving_dandiset, starting_assets, "both", {})
@pytest.mark.parametrize("existing", ["skip", "overwrite"])
def test_move_both_dest_path_not_in_remote(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, existing: str
) -> None:
(moving_dandiset.dspath / "subdir2" / "file.txt").write_text("This is a file.\n")
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(AssetMismatchError) as excinfo:
move(
"file.txt",
dest="subdir2",
work_on="both",
existing=existing,
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
assert (
str(excinfo.value) == "Mismatch between local and remote Dandisets:\n"
"- Asset 'file.txt' would be moved to 'subdir2/file.txt', which exists"
" locally but not remotely"
)
check_assets(moving_dandiset, starting_assets, "both", {})
@pytest.mark.parametrize("existing", ["skip", "overwrite"])
def test_move_both_dest_path_not_in_local(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, existing: str
) -> None:
(moving_dandiset.dspath / "subdir2" / "banana.txt").unlink()
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(AssetMismatchError) as excinfo:
move(
"file.txt",
dest="subdir2/banana.txt",
work_on="both",
existing=existing,
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
assert (
str(excinfo.value)
== "Mismatch between local and remote Dandisets:\n- Asset 'file.txt'"
" would be moved to 'subdir2/banana.txt', which exists remotely but"
" not locally"
)
check_assets(moving_dandiset, starting_assets, "both", {"subdir2/banana.txt": None})
def test_move_both_dest_mismatch(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset
) -> None:
(moving_dandiset.dspath / "subdir1" / "apple.txt").unlink()
(moving_dandiset.dspath / "subdir1" / "apple.txt").mkdir()
(moving_dandiset.dspath / "subdir1" / "apple.txt" / "seeds").write_text("12345\n")
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
with pytest.raises(AssetMismatchError) as excinfo:
move(
"file.txt",
dest="subdir1/apple.txt",
work_on="both",
existing="overwrite",
dandi_instance=moving_dandiset.api.instance_id,
devel_debug=True,
)
assert (
str(excinfo.value) == "Mismatch between local and remote Dandisets:\n"
"- Asset 'file.txt' would be moved to 'subdir1/apple.txt/file.txt'"
" locally but to 'subdir1/apple.txt' remotely"
)
check_assets(moving_dandiset, starting_assets, "both", {"subdir1/apple.txt": None})
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_pyout(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
"file.txt",
"subdir4/foo.json",
dest="subdir5",
work_on=work_on,
existing="overwrite",
devel_debug=False,
dandi_instance=moving_dandiset.api.instance_id,
)
check_assets(
moving_dandiset,
starting_assets,
work_on,
{
"file.txt": "subdir5/file.txt",
"subdir4/foo.json": "subdir5/foo.json",
"subdir5/foo.json": None,
},
)
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_pyout_dry_run(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset, work_on: str
) -> None:
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
"file.txt",
"subdir4/foo.json",
dest="subdir5",
work_on=work_on,
existing="overwrite",
devel_debug=False,
dry_run=True,
dandi_instance=moving_dandiset.api.instance_id,
)
check_assets(moving_dandiset, starting_assets, work_on, {})
@pytest.mark.parametrize("work_on", ["local", "remote", "both"])
def test_move_path_to_self(
caplog: pytest.LogCaptureFixture,
monkeypatch: pytest.MonkeyPatch,
moving_dandiset: SampleDandiset,
work_on: str,
) -> None:
(moving_dandiset.dspath / "newdir").mkdir()
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath / "subdir1")
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
"apple.txt",
dest="../subdir1",
work_on=work_on,
devel_debug=True,
dandi_instance=moving_dandiset.api.instance_id,
)
for where in ["local", "remote"] if work_on == "both" else [work_on]:
assert (
"dandi",
logging.DEBUG,
f"Would move {where} asset 'subdir1/apple.txt' to itself; ignoring",
) in caplog.record_tuples
assert ("dandi", logging.INFO, "Nothing to move") in caplog.record_tuples
check_assets(moving_dandiset, starting_assets, work_on, {})
def test_move_remote_dest_is_local_dir_sans_slash(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset
) -> None:
(moving_dandiset.dspath / "newdir").mkdir()
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
"file.txt",
dest="newdir",
work_on="remote",
devel_debug=True,
dandi_instance=moving_dandiset.api.instance_id,
)
check_assets(moving_dandiset, starting_assets, "remote", {"file.txt": "newdir"})
def test_move_both_dest_is_local_dir_sans_slash(
monkeypatch: pytest.MonkeyPatch, moving_dandiset: SampleDandiset
) -> None:
(moving_dandiset.dspath / "newdir").mkdir()
starting_assets = list(moving_dandiset.dandiset.get_assets())
monkeypatch.chdir(moving_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", moving_dandiset.api.api_key)
move(
"file.txt",
dest="newdir",
work_on="both",
devel_debug=True,
dandi_instance=moving_dandiset.api.instance_id,
)
check_assets(
moving_dandiset, starting_assets, "both", {"file.txt": "newdir/file.txt"}
)
| [
"[email protected]"
] | |
5d0e57eddce91b4d61e123d45d851339227494c9 | 51f2492a5c207e3664de8f6b2d54bb93e313ca63 | /atcoder/arc095/d.py | c2157f467e42ce2ef4a64ede2861cce4c65cbefc | [
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | abeaumont/competitive-programming | 23c5aabd587d7bb15a61efd3428838cb934233dd | a24c9b89941a59d344b51dc1010de66522b1a0dd | refs/heads/master | 2023-09-01T09:50:58.267361 | 2023-07-31T18:00:10 | 2023-07-31T18:00:10 | 117,589,708 | 618 | 262 | WTFPL | 2023-07-12T17:36:20 | 2018-01-15T20:00:56 | C++ | UTF-8 | Python | false | false | 291 | py | #!/usr/bin/env python3
# https://arc095.contest.atcoder.jp/tasks/arc095_b
n = int(input())
a = [int(x) for x in input().split()]
a.sort()
y = a[-1]
m = None
x = None
for i in range(n - 1):
k = a[i]
l = max(k, y - k)
if m is None or m > l:
m = l
x = k
print(y, x)
| [
"[email protected]"
] | |
10f9b023d025a4040f73e9e8d651b69aa9b0dd26 | f5c9e20987413a46c7f2e856404813d27cde26e7 | /mysite/pages/views.py | 69e176db25833b22ebab966802d685dbeca9178a | [] | no_license | PriyankaBuchkul/practice | dc1573209edf7cc5d8abecfce485d15de7f029fd | 16d864b1a6082814978f68ee0daf8fa404a89f2a | refs/heads/master | 2020-03-13T05:17:41.406838 | 2018-04-25T09:55:11 | 2018-04-25T09:55:11 | 130,980,621 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | from django.shortcuts import render
from django.http import HttpResponse
from .models import Posts
# Create your views here.
def index(request):
#return HttpResponse('Hello From Post')
posts=Posts.objects.all()[:10]
contexts = {
'title':'latest Posts',
'posts':posts
}
return render(request,'pages/index.html',contexts)
def details(request,id):
post=Posts.objects.get(id=id)
context = {
'post' : post
}
return render(request,'pages/details.html',context)
| [
"[email protected]"
] | |
eebe441b71964e68194495cd7d2c119654278901 | 5955ea34fd72c719f3cb78fbb3c7e802a2d9109a | /_STRUCTURES/LIST/Create/create_list_1.py | 8d96f1119bfb4c6ad8eb7b8b7f41d1c5a7fc986f | [] | no_license | AndreySperansky/TUITION | 3c90ac45f11c70dce04008adc1e9f9faad840b90 | 583d3a760d1f622689f6f4f482c905b065d6c732 | refs/heads/master | 2022-12-21T21:48:21.936988 | 2020-09-28T23:18:40 | 2020-09-28T23:18:40 | 299,452,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | A = ['red', 'green', 'blue']
print(' '.join(A))
# red green blue
c = ['alfa', 'betta', 'gamma']
print(' '.join(map(str, c)))
# alfa betta gamma
d = [1, 2, 3]
print(' '.join(map(str, d)))
# 1 2 3 | [
"[email protected]"
] | |
afde19ef47f9828ec2b07a66971ff95d77feb5d4 | 9e45801526b372ea364e1aaf8df4f8ce6be4d754 | /tnd_server/handlers/group.py | f4cf698a3a46187b8153799f34b7ccab92169693 | [] | no_license | niyoufa/ods | c09c5bd4e429cd6d4043d76ce1d89f413946a9d1 | 468ffe3fa34e17fecd0c872bdbaa9701b81b50d5 | refs/heads/master | 2021-01-19T21:23:43.812758 | 2016-07-08T09:49:04 | 2016-07-08T09:49:04 | 59,827,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,919 | py | #coding=utf-8
"""
author : niyoufa
date : 2016-06-30
"""
import sys, pdb, json, datetime, pymongo, urllib
import tornado.web
import ods.tnd_server.status as status
import ods.utils as utils
import ods.tnd_server.settings as settings
import ods.tnd_server.handler as handler
import ods.clients.curl as curl
import ods.clients.rong as rong
import ods.dhui.dhui_task as dt
class DhuiGroupJoinHandler(handler.APIHandler):
#加入群组
def post(self):
result = utils.init_response_data()
try:
user_id=self.get_argument('user_id')
group_id=self.get_argument('group_id')
group_name=self.get_argument('group_name')
except Exception, e:
result = utils.reset_response_data(status.Status.PARMAS_ERROR,str(e))
self.finish(result)
return
#
self.finish(result)
class DhuiGroupQuitHandler(handler.APIHandler):
#退出群组
def post(self):
result = utils.init_response_data()
try:
user_id=self.get_argument('user_id')
group_id=self.get_argument('group_id')
except Exception, e:
result = utils.reset_response_data(status.Status.PARMAS_ERROR,str(e))
self.finish(result)
return
self.finish(result)
class DhuiGroupUserQueryHandler(handler.APIHandler):
#查询群成员
def get(self):
result = utils.init_response_data()
try:
group_id=self.get_argument('group_id')
except Exception, e:
result = utils.reset_response_data(status.Status.PARMAS_ERROR,str(e))
self.finish(result)
return
self.finish(result)
class DhuiGroupUserDetailQueryHandler(handler.APIHandler):
#查询群组内所有用户的user表中的信息
def get(self):
result = utils.init_response_data()
try:
group_id=self.get_argument('group_id','')
except Exception, e:
result = utils.reset_response_data(status.Status.PARMAS_ERROR,str(e))
self.finish(result)
return
self.finish(result)
class DhuiUserGroupQueryHandler(handler.APIHandler):
#获取某用户所在所有群组的group_id和group_name
def get(self):
result = utils.init_response_data()
try:
user_id = self.get_argument('user_id','')
except Exception, e:
result = utils.reset_response_data(status.Status.PARMAS_ERROR,str(e))
self.finish(result)
return
self.finish(result)
handlers = [
(r"/odoo/api/group/join",DhuiGroupJoinHandler),
(r"/odoo/api/group/quit",DhuiGroupQuitHandler),
(r"/odoo/api/group/user/query",DhuiGroupUserQueryHandler),
(r"/odoo/api/group/user/detail/query",DhuiGroupUserDetailQueryHandler),
(r"/odoo/api/user/group",DhuiUserGroupQueryHandler),
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.