code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# coding=utf-8
# Copyright 2017 The DLT2T Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for DLT2T.registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from DLT2T.utils import modality
from DLT2T.utils import registry
from DLT2T.utils import t2t_model
import tensorflow as tf
# pylint: disable=unused-variable
class ModelRegistryTest(tf.test.TestCase):
def setUp(self):
registry._reset()
def testT2TModelRegistration(self):
@registry.register_model
class MyModel1(t2t_model.T2TModel):
pass
model = registry.model("my_model1")
self.assertTrue(model is MyModel1)
def testNamedRegistration(self):
@registry.register_model("model2")
class MyModel1(t2t_model.T2TModel):
pass
model = registry.model("model2")
self.assertTrue(model is MyModel1)
def testNonT2TModelRegistration(self):
@registry.register_model
def model_fn():
pass
model = registry.model("model_fn")
self.assertTrue(model is model_fn)
def testUnknownModel(self):
with self.assertRaisesRegexp(LookupError, "never registered"):
registry.model("not_registered")
def testDuplicateRegistration(self):
@registry.register_model
def m1():
pass
with self.assertRaisesRegexp(LookupError, "already registered"):
@registry.register_model("m1")
def m2():
pass
def testListModels(self):
@registry.register_model
def m1():
pass
@registry.register_model
def m2():
pass
self.assertSetEqual(set(["m1", "m2"]), set(registry.list_models()))
def testSnakeCase(self):
convert = registry._convert_camel_to_snake
self.assertEqual("typical_camel_case", convert("TypicalCamelCase"))
self.assertEqual("numbers_fuse2gether", convert("NumbersFuse2gether"))
self.assertEqual("numbers_fuse2_gether", convert("NumbersFuse2Gether"))
self.assertEqual("lstm_seq2_seq", convert("LSTMSeq2Seq"))
self.assertEqual("starts_lower", convert("startsLower"))
self.assertEqual("starts_lower_caps", convert("startsLowerCAPS"))
self.assertEqual("caps_fuse_together", convert("CapsFUSETogether"))
self.assertEqual("startscap", convert("Startscap"))
self.assertEqual("s_tartscap", convert("STartscap"))
class HParamRegistryTest(tf.test.TestCase):
def setUp(self):
registry._reset()
def testHParamSet(self):
@registry.register_hparams
def my_hparams_set():
pass
@registry.register_ranged_hparams
def my_hparams_range(_):
pass
self.assertTrue(registry.hparams("my_hparams_set") is my_hparams_set)
self.assertTrue(
registry.ranged_hparams("my_hparams_range") is my_hparams_range)
def testNamedRegistration(self):
@registry.register_hparams("a")
def my_hparams_set():
pass
@registry.register_ranged_hparams("a")
def my_hparams_range(_):
pass
self.assertTrue(registry.hparams("a") is my_hparams_set)
self.assertTrue(registry.ranged_hparams("a") is my_hparams_range)
def testUnknownHparams(self):
with self.assertRaisesRegexp(LookupError, "never registered"):
registry.hparams("not_registered")
with self.assertRaisesRegexp(LookupError, "never registered"):
registry.ranged_hparams("not_registered")
def testDuplicateRegistration(self):
@registry.register_hparams
def hp1():
pass
with self.assertRaisesRegexp(LookupError, "already registered"):
@registry.register_hparams("hp1")
def hp2():
pass
@registry.register_ranged_hparams
def rhp1(_):
pass
with self.assertRaisesRegexp(LookupError, "already registered"):
@registry.register_ranged_hparams("rhp1")
def rhp2(_):
pass
def testListHparams(self):
@registry.register_hparams
def hp1():
pass
@registry.register_hparams("hp2_named")
def hp2():
pass
@registry.register_ranged_hparams
def rhp1(_):
pass
@registry.register_ranged_hparams("rhp2_named")
def rhp2(_):
pass
self.assertSetEqual(set(["hp1", "hp2_named"]), set(registry.list_hparams()))
self.assertSetEqual(
set(["rhp1", "rhp2_named"]), set(registry.list_ranged_hparams()))
def testRangeSignatureCheck(self):
with self.assertRaisesRegexp(ValueError, "must take a single argument"):
@registry.register_ranged_hparams
def rhp_bad():
pass
with self.assertRaisesRegexp(ValueError, "must take a single argument"):
@registry.register_ranged_hparams
def rhp_bad2(a, b): # pylint: disable=unused-argument
pass
class ModalityRegistryTest(tf.test.TestCase):
def setUp(self):
registry._reset()
def testModalityRegistration(self):
@registry.register_symbol_modality
class MySymbolModality(modality.Modality):
pass
@registry.register_audio_modality
class MyAudioModality(modality.Modality):
pass
@registry.register_image_modality
class MyImageModality(modality.Modality):
pass
@registry.register_class_label_modality
class MyClassLabelModality(modality.Modality):
pass
self.assertTrue(
registry.symbol_modality("my_symbol_modality") is MySymbolModality)
self.assertTrue(
registry.audio_modality("my_audio_modality") is MyAudioModality)
self.assertTrue(
registry.image_modality("my_image_modality") is MyImageModality)
self.assertTrue(
registry.class_label_modality("my_class_label_modality") is
MyClassLabelModality)
def testDefaultNameLookup(self):
@registry.register_symbol_modality("default")
class MyDefaultModality(modality.Modality):
pass
self.assertTrue(registry.symbol_modality() is MyDefaultModality)
def testList(self):
@registry.register_symbol_modality
class MySymbolModality(modality.Modality):
pass
@registry.register_audio_modality
class MyAudioModality(modality.Modality):
pass
@registry.register_image_modality
class MyImageModality(modality.Modality):
pass
@registry.register_class_label_modality
class MyClassLabelModality(modality.Modality):
pass
expected = [
"symbol:my_symbol_modality", "audio:my_audio_modality",
"image:my_image_modality", "class_label:my_class_label_modality"
]
self.assertSetEqual(set(registry.list_modalities()), set(expected))
if __name__ == "__main__":
tf.test.main()
| renqianluo/DLT2T | DLT2T/utils/registry_test.py | Python | apache-2.0 | 7,045 |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
from pychron.hardware.core.core_device import CoreDevice
class TempHumMicroServer(CoreDevice):
"""
http://www.omega.com/Manuals/manualpdf/M3861.pdf
iServer MicroServer
tested with iTHX-W
"""
scan_func = 'read_temperature'
def read_temperature(self, **kw):
v = self.ask('*SRTF', timeout=1.0, **kw)
return self._parse_response(v)
def read_humidity(self, **kw):
v = self.ask('*SRH', timeout=1.0, **kw)
return self._parse_response(v)
def _parse_response(self, v):
try:
return float(v)
except (AttributeError, ValueError, TypeError):
return self.get_random_value()
# ============= EOF =============================================
| UManPychron/pychron | pychron/hardware/environmental_probe.py | Python | apache-2.0 | 1,717 |
# Copyright 2015 Rackspace, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from onmetal_scripts.lib import states
from onmetal_scripts import reboot_unprovisioned
from onmetal_scripts.tests import base
import mock
class TestRebootUnprovisioned(base.BaseTest):
def setUp(self):
self.script = reboot_unprovisioned.RebootUnprovisioned()
self.script.get_argument = mock.Mock()
self.script.get_argument.return_value = 0
@mock.patch('onmetal_scripts.reboot_unprovisioned.RebootUnprovisioned.'
'ironic_client')
def test_run(self, ironic_mock):
active_node = self._get_test_node(
provision_state=states.ACTIVE,
instance_uuid='118ad976-084a-443f-9ec5-77d477f2bfcc')
inactive_node = self._get_test_node(
provision_state=states.AVAILABLE,
instance_uuid=None,
maintenance=False)
ironic_mock.list_nodes.return_value = [active_node, inactive_node]
self.script.run()
ironic_mock.set_target_power_state.assert_called_once_with(
inactive_node, states.REBOOT)
@mock.patch('onmetal_scripts.reboot_unprovisioned.RebootUnprovisioned.'
'ironic_client')
def test_run_fail(self, ironic_mock):
inactive_node = self._get_test_node(
provision_state=states.AVAILABLE,
instance_uuid=None,
maintenance=False)
ironic_mock.list_nodes.return_value = [inactive_node]
ironic_mock.set_target_power_state.side_effect = ValueError
self.script.run()
ironic_mock.set_target_power_state.assert_called_once_with(
inactive_node, states.REBOOT)
| rackerlabs/onmetal-scripts | onmetal_scripts/tests/test_reboot_unprovisioned.py | Python | apache-2.0 | 2,243 |
"""Voluptuous schemas for the KNX integration."""
import voluptuous as vol
from xknx.devices.climate import SetpointShiftMode
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE_CLASS,
CONF_ENTITY_ID,
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_TYPE,
)
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_STATE_ADDRESS,
CONF_SYNC_STATE,
OPERATION_MODES,
PRESET_MODES,
ColorTempModes,
)
class ConnectionSchema:
"""Voluptuous schema for KNX connection."""
CONF_KNX_LOCAL_IP = "local_ip"
TUNNELING_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_KNX_LOCAL_IP): cv.string,
vol.Optional(CONF_PORT): cv.port,
}
)
ROUTING_SCHEMA = vol.Schema({vol.Optional(CONF_KNX_LOCAL_IP): cv.string})
class CoverSchema:
"""Voluptuous schema for KNX covers."""
CONF_MOVE_LONG_ADDRESS = "move_long_address"
CONF_MOVE_SHORT_ADDRESS = "move_short_address"
CONF_STOP_ADDRESS = "stop_address"
CONF_POSITION_ADDRESS = "position_address"
CONF_POSITION_STATE_ADDRESS = "position_state_address"
CONF_ANGLE_ADDRESS = "angle_address"
CONF_ANGLE_STATE_ADDRESS = "angle_state_address"
CONF_TRAVELLING_TIME_DOWN = "travelling_time_down"
CONF_TRAVELLING_TIME_UP = "travelling_time_up"
CONF_INVERT_POSITION = "invert_position"
CONF_INVERT_ANGLE = "invert_angle"
DEFAULT_TRAVEL_TIME = 25
DEFAULT_NAME = "KNX Cover"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MOVE_LONG_ADDRESS): cv.string,
vol.Optional(CONF_MOVE_SHORT_ADDRESS): cv.string,
vol.Optional(CONF_STOP_ADDRESS): cv.string,
vol.Optional(CONF_POSITION_ADDRESS): cv.string,
vol.Optional(CONF_POSITION_STATE_ADDRESS): cv.string,
vol.Optional(CONF_ANGLE_ADDRESS): cv.string,
vol.Optional(CONF_ANGLE_STATE_ADDRESS): cv.string,
vol.Optional(
CONF_TRAVELLING_TIME_DOWN, default=DEFAULT_TRAVEL_TIME
): cv.positive_int,
vol.Optional(
CONF_TRAVELLING_TIME_UP, default=DEFAULT_TRAVEL_TIME
): cv.positive_int,
vol.Optional(CONF_INVERT_POSITION, default=False): cv.boolean,
vol.Optional(CONF_INVERT_ANGLE, default=False): cv.boolean,
}
)
class BinarySensorSchema:
"""Voluptuous schema for KNX binary sensors."""
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_SYNC_STATE = CONF_SYNC_STATE
CONF_IGNORE_INTERNAL_STATE = "ignore_internal_state"
CONF_AUTOMATION = "automation"
CONF_HOOK = "hook"
CONF_DEFAULT_HOOK = "on"
CONF_COUNTER = "counter"
CONF_DEFAULT_COUNTER = 1
CONF_ACTION = "action"
CONF_RESET_AFTER = "reset_after"
DEFAULT_NAME = "KNX Binary Sensor"
AUTOMATION_SCHEMA = vol.Schema(
{
vol.Optional(CONF_HOOK, default=CONF_DEFAULT_HOOK): cv.string,
vol.Optional(CONF_COUNTER, default=CONF_DEFAULT_COUNTER): cv.port,
vol.Required(CONF_ACTION): cv.SCRIPT_SCHEMA,
}
)
AUTOMATIONS_SCHEMA = vol.All(cv.ensure_list, [AUTOMATION_SCHEMA])
SCHEMA = vol.All(
cv.deprecated("significant_bit"),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): vol.Any(
vol.All(vol.Coerce(int), vol.Range(min=2, max=1440)),
cv.boolean,
cv.string,
),
vol.Optional(CONF_IGNORE_INTERNAL_STATE, default=False): cv.boolean,
vol.Required(CONF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_DEVICE_CLASS): cv.string,
vol.Optional(CONF_RESET_AFTER): cv.positive_int,
vol.Optional(CONF_AUTOMATION): AUTOMATIONS_SCHEMA,
}
),
)
class LightSchema:
"""Voluptuous schema for KNX lights."""
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_BRIGHTNESS_ADDRESS = "brightness_address"
CONF_BRIGHTNESS_STATE_ADDRESS = "brightness_state_address"
CONF_COLOR_ADDRESS = "color_address"
CONF_COLOR_STATE_ADDRESS = "color_state_address"
CONF_COLOR_TEMP_ADDRESS = "color_temperature_address"
CONF_COLOR_TEMP_STATE_ADDRESS = "color_temperature_state_address"
CONF_COLOR_TEMP_MODE = "color_temperature_mode"
CONF_RGBW_ADDRESS = "rgbw_address"
CONF_RGBW_STATE_ADDRESS = "rgbw_state_address"
CONF_MIN_KELVIN = "min_kelvin"
CONF_MAX_KELVIN = "max_kelvin"
DEFAULT_NAME = "KNX Light"
DEFAULT_COLOR_TEMP_MODE = "absolute"
DEFAULT_MIN_KELVIN = 2700 # 370 mireds
DEFAULT_MAX_KELVIN = 6000 # 166 mireds
SCHEMA = vol.Schema(
{
vol.Required(CONF_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_BRIGHTNESS_ADDRESS): cv.string,
vol.Optional(CONF_BRIGHTNESS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_STATE_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_TEMP_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_TEMP_STATE_ADDRESS): cv.string,
vol.Optional(
CONF_COLOR_TEMP_MODE, default=DEFAULT_COLOR_TEMP_MODE
): cv.enum(ColorTempModes),
vol.Optional(CONF_RGBW_ADDRESS): cv.string,
vol.Optional(CONF_RGBW_STATE_ADDRESS): cv.string,
vol.Optional(CONF_MIN_KELVIN, default=DEFAULT_MIN_KELVIN): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_MAX_KELVIN, default=DEFAULT_MAX_KELVIN): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
)
class ClimateSchema:
"""Voluptuous schema for KNX climate devices."""
CONF_SETPOINT_SHIFT_ADDRESS = "setpoint_shift_address"
CONF_SETPOINT_SHIFT_STATE_ADDRESS = "setpoint_shift_state_address"
CONF_SETPOINT_SHIFT_MODE = "setpoint_shift_mode"
CONF_SETPOINT_SHIFT_MAX = "setpoint_shift_max"
CONF_SETPOINT_SHIFT_MIN = "setpoint_shift_min"
CONF_TEMPERATURE_ADDRESS = "temperature_address"
CONF_TEMPERATURE_STEP = "temperature_step"
CONF_TARGET_TEMPERATURE_ADDRESS = "target_temperature_address"
CONF_TARGET_TEMPERATURE_STATE_ADDRESS = "target_temperature_state_address"
CONF_OPERATION_MODE_ADDRESS = "operation_mode_address"
CONF_OPERATION_MODE_STATE_ADDRESS = "operation_mode_state_address"
CONF_CONTROLLER_STATUS_ADDRESS = "controller_status_address"
CONF_CONTROLLER_STATUS_STATE_ADDRESS = "controller_status_state_address"
CONF_CONTROLLER_MODE_ADDRESS = "controller_mode_address"
CONF_CONTROLLER_MODE_STATE_ADDRESS = "controller_mode_state_address"
CONF_HEAT_COOL_ADDRESS = "heat_cool_address"
CONF_HEAT_COOL_STATE_ADDRESS = "heat_cool_state_address"
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS = (
"operation_mode_frost_protection_address"
)
CONF_OPERATION_MODE_NIGHT_ADDRESS = "operation_mode_night_address"
CONF_OPERATION_MODE_COMFORT_ADDRESS = "operation_mode_comfort_address"
CONF_OPERATION_MODE_STANDBY_ADDRESS = "operation_mode_standby_address"
CONF_OPERATION_MODES = "operation_modes"
CONF_ON_OFF_ADDRESS = "on_off_address"
CONF_ON_OFF_STATE_ADDRESS = "on_off_state_address"
CONF_ON_OFF_INVERT = "on_off_invert"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
DEFAULT_NAME = "KNX Climate"
DEFAULT_SETPOINT_SHIFT_MODE = "DPT6010"
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEFAULT_TEMPERATURE_STEP = 0.1
DEFAULT_ON_OFF_INVERT = False
SCHEMA = vol.All(
cv.deprecated("setpoint_shift_step", replacement_key=CONF_TEMPERATURE_STEP),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_SETPOINT_SHIFT_MODE, default=DEFAULT_SETPOINT_SHIFT_MODE
): cv.enum(SetpointShiftMode),
vol.Optional(
CONF_SETPOINT_SHIFT_MAX, default=DEFAULT_SETPOINT_SHIFT_MAX
): vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(
CONF_SETPOINT_SHIFT_MIN, default=DEFAULT_SETPOINT_SHIFT_MIN
): vol.All(int, vol.Range(min=-32, max=0)),
vol.Optional(
CONF_TEMPERATURE_STEP, default=DEFAULT_TEMPERATURE_STEP
): vol.All(float, vol.Range(min=0, max=2)),
vol.Required(CONF_TEMPERATURE_ADDRESS): cv.string,
vol.Required(CONF_TARGET_TEMPERATURE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_TARGET_TEMPERATURE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_HEAT_COOL_ADDRESS): cv.string,
vol.Optional(CONF_HEAT_COOL_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_NIGHT_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_COMFORT_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_STANDBY_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_STATE_ADDRESS): cv.string,
vol.Optional(
CONF_ON_OFF_INVERT, default=DEFAULT_ON_OFF_INVERT
): cv.boolean,
vol.Optional(CONF_OPERATION_MODES): vol.All(
cv.ensure_list, [vol.In({**OPERATION_MODES, **PRESET_MODES})]
),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
}
),
)
class SwitchSchema:
"""Voluptuous schema for KNX switches."""
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
DEFAULT_NAME = "KNX Switch"
SCHEMA = vol.Schema(
{
vol.Required(CONF_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_STATE_ADDRESS): cv.string,
}
)
class ExposeSchema:
"""Voluptuous schema for KNX exposures."""
CONF_KNX_EXPOSE_TYPE = CONF_TYPE
CONF_KNX_EXPOSE_ATTRIBUTE = "attribute"
CONF_KNX_EXPOSE_DEFAULT = "default"
CONF_KNX_EXPOSE_ADDRESS = CONF_ADDRESS
SCHEMA = vol.Schema(
{
vol.Required(CONF_KNX_EXPOSE_TYPE): vol.Any(int, float, str),
vol.Optional(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_KNX_EXPOSE_ATTRIBUTE): cv.string,
vol.Optional(CONF_KNX_EXPOSE_DEFAULT): cv.match_all,
vol.Required(CONF_KNX_EXPOSE_ADDRESS): cv.string,
}
)
class NotifySchema:
"""Voluptuous schema for KNX notifications."""
DEFAULT_NAME = "KNX Notify"
SCHEMA = vol.Schema(
{
vol.Required(CONF_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
class SensorSchema:
"""Voluptuous schema for KNX sensors."""
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_SYNC_STATE = CONF_SYNC_STATE
DEFAULT_NAME = "KNX Sensor"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): vol.Any(
vol.All(vol.Coerce(int), vol.Range(min=2, max=1440)),
cv.boolean,
cv.string,
),
vol.Required(CONF_STATE_ADDRESS): cv.string,
vol.Required(CONF_TYPE): vol.Any(int, float, str),
}
)
class SceneSchema:
"""Voluptuous schema for KNX scenes."""
CONF_SCENE_NUMBER = "scene_number"
DEFAULT_NAME = "KNX SCENE"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_ADDRESS): cv.string,
vol.Required(CONF_SCENE_NUMBER): cv.positive_int,
}
)
class WeatherSchema:
"""Voluptuous schema for KNX weather station."""
CONF_SYNC_STATE = CONF_SYNC_STATE
CONF_KNX_TEMPERATURE_ADDRESS = "address_temperature"
CONF_KNX_BRIGHTNESS_SOUTH_ADDRESS = "address_brightness_south"
CONF_KNX_BRIGHTNESS_EAST_ADDRESS = "address_brightness_east"
CONF_KNX_BRIGHTNESS_WEST_ADDRESS = "address_brightness_west"
CONF_KNX_WIND_SPEED_ADDRESS = "address_wind_speed"
CONF_KNX_RAIN_ALARM_ADDRESS = "address_rain_alarm"
CONF_KNX_FROST_ALARM_ADDRESS = "address_frost_alarm"
CONF_KNX_WIND_ALARM_ADDRESS = "address_wind_alarm"
CONF_KNX_DAY_NIGHT_ADDRESS = "address_day_night"
CONF_KNX_AIR_PRESSURE_ADDRESS = "address_air_pressure"
CONF_KNX_HUMIDITY_ADDRESS = "address_humidity"
CONF_KNX_EXPOSE_SENSORS = "expose_sensors"
DEFAULT_NAME = "KNX Weather Station"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): vol.Any(
vol.All(vol.Coerce(int), vol.Range(min=2, max=1440)),
cv.boolean,
cv.string,
),
vol.Optional(CONF_KNX_EXPOSE_SENSORS, default=False): cv.boolean,
vol.Required(CONF_KNX_TEMPERATURE_ADDRESS): cv.string,
vol.Optional(CONF_KNX_BRIGHTNESS_SOUTH_ADDRESS): cv.string,
vol.Optional(CONF_KNX_BRIGHTNESS_EAST_ADDRESS): cv.string,
vol.Optional(CONF_KNX_BRIGHTNESS_WEST_ADDRESS): cv.string,
vol.Optional(CONF_KNX_WIND_SPEED_ADDRESS): cv.string,
vol.Optional(CONF_KNX_RAIN_ALARM_ADDRESS): cv.string,
vol.Optional(CONF_KNX_FROST_ALARM_ADDRESS): cv.string,
vol.Optional(CONF_KNX_WIND_ALARM_ADDRESS): cv.string,
vol.Optional(CONF_KNX_DAY_NIGHT_ADDRESS): cv.string,
vol.Optional(CONF_KNX_AIR_PRESSURE_ADDRESS): cv.string,
vol.Optional(CONF_KNX_HUMIDITY_ADDRESS): cv.string,
}
)
| tchellomello/home-assistant | homeassistant/components/knx/schema.py | Python | apache-2.0 | 14,928 |
#
# Copyright 2015 Fasih
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class DN(object):
def __init__(self, dn):
self._dn = dn.replace(',dn', '')
self._cn = []
self._displayName = []
self._givenName = []
self._homePhone = []
self._homePostalAddress = []
self._mail = []
self._mobile = []
self._o = []
self._objectClass = []
self._sn = []
self._telephoneNumber = []
self._title = []
@property
def dn(self): return self._dn
@property
def cn(self): return self._cn
@cn.setter
def cn(self, v):
self._cn.append(v)
@property
def displayName(self): return self._displayName
@displayName.setter
def displayName(self, v):
self._displayName.append(v)
@property
def givenName(self): return self._givenName
@givenName.setter
def givenName(self, v):
self._givenName.append(v)
@property
def homePhone(self): return self._homePhone
@homePhone.setter
def homePhone(self, v):
self._homePhone.append(v)
@property
def homePostalAddress(self): return self._homePostalAddress
@homePostalAddress.setter
def homePostalAddress(self, v):
self._homePostalAddress.append(v)
@property
def mail(self): return self._mail
@mail.setter
def mail(self, v):
self._mail.append(v)
@property
def mobile(self): return self._mobile
@mobile.setter
def mobile(self, v):
self._mobile.append(v)
@property
def o(self): return self._o
@o.setter
def o(self, v):
self._o.append(v)
@property
def objectClass(self): return self._objectClass
@objectClass.setter
def objectClass(self, v):
self._objectClass.append(v)
@property
def sn(self): return self._sn
@sn.setter
def sn(self, v):
self._sn.append(v)
@property
def telephoneNumber(self): return self._telephoneNumber
@telephoneNumber.setter
def telephoneNumber(self, v):
self._telephoneNumber.append(v)
@property
def title(self): return self._title
@title.setter
def title(self, v):
self._title.append(v)
def csv(self):
items = []
items.append(self.displayName)
items.append(self.givenName)
items.append(self.sn)
items.append(self.title)
items.append(['Home'])
items.append(self.homePhone)
items.append(['Mobile'])
items.append(self.mobile)
items.append(['Mobile'])
items.append(self.telephoneNumber)
items.append(['Home'])
items.append(self.homePostalAddress)
items.append(self.mail)
items.append(self.o)
return ','.join([' ::: '.join([x.replace(',', ' ') for x in i]) for i in items])
def __str__(self):
s = 'DN<dn=%s' % self._dn
if self.cn != []: s += ', cn=%s' % self.cn
if self.displayName != []: s += ', displayName=%s' % self.displayName
if self.givenName != []: s += ', givenName=%s' % self.givenName
if self.homePhone != []: s += ', homePhone=%s' % self.homePhone
if self.homePostalAddress != []: s += ', homePostalAddress=%s' % self.homePostalAddress
if self.mail != []: s += ', mail=%s' % self.mail
if self.mobile != []: s += ', mobile=%s' % self.mobile
if self.o != []: s += ', o=%s' % self.o
if self.objectClass != []: s += ', objectClass=%s' % self.objectClass
if self.sn != []: s += ', sn=%s' % self.sn
if self.telephoneNumber != []: s += ', telephoneNumber=%s' % self.telephoneNumber
if self.title != []: s += ', title=%s' % self.title
return s + '>'
| faskiri/barry2gugl | dn.py | Python | apache-2.0 | 3,915 |
#!/usr/bin/python
import re
userInput = raw_input("input equation\n")
numCount = 0
operandCount = 0
entryBracketCount = 0
exitBracketCount = 0
charCount = 0
endOfLine = len(userInput) - 1
for i in range(len(userInput)):
if (re.search('[\s*a-z\s*A-Z]+', userInput[i])):
charCount = charCount + 1
print operandCount, " 1"
elif (re.search('[\s*0-9]+', userInput[i])):
numCount = numCount + 1
print operandCount, " 2"
elif (re.search('[\*]', userInput[i])):
print 'TRUE'
# operandCount = operandCount + 1
# print operandCount, " 3.5"
# elif (re.search('[\s*\+|\s*\-|\s*\/]+', userInput[i])):
elif (re.search('[+-/*]+', userInput[i])):
operandCount = operandCount + 1
print operandCount, " 3"
# if(re.search('[\s*\+|\s*\-|\s*\/]+', userInput[endOfLine])):
if(re.search('[+-/*]+', userInput[endOfLine])):
print "invalid expression"
print "1"
exit(0)
else:
if((re.search('[\s*a-zA-Z]+', userInput[i - 1])) or (re.search('[\s*\d]+', userInput[i - 1]))):
continue
else:
print 'invalid expression'
print '2'
exit(0)
if(re.search('[\s*\d]+', userInput[i - 1])):
continue
else:
print 'invalid expression'
print '3'
exit(0)
if(re.search('[\s*a-zA-Z]+', userInput[i + 1])):
continue
elif(re.search('[\s*\d]+', userInput[i + 1])):
continue
elif (re.search('[\(]+', userInput[i + 1])):
continue
elif (re.search('[\)]+', userInput[i + 1])):
continue
else:
print 'invalid expression'
print '4'
exit(0)
elif (re.search('[\(]+', userInput[i])):
entryBracketCount = entryBracketCount + 1
print operandCount, " 4"
elif (re.search('[\)]+', userInput[i])):
exitBracketCount = exitBracketCount + 1
print operandCount, " 5"
if(re.search('[\)]+', userInput[endOfLine])):
continue
else:
if(re.search('[\(]+', userInput[i + 1])):
print 'invalid expression'
print '5'
exit(0)
print operandCount, " 6"
if (entryBracketCount != exitBracketCount):
print "invalid expression"
print '6'
exit(0)
elif operandCount == 0:
print operandCount
print "invalid expression"
print '7'
exit(0)
elif ((numCount == 0) and (charCount == 0)):
print "invalid expression"
print '8'
exit(0)
else:
print "valid expression"
| dominickhera/PosaRepo | cis3250labs/parseTest.py | Python | apache-2.0 | 2,244 |
"""
Support for KNX/IP climate devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.knx/
"""
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.climate import (
PLATFORM_SCHEMA, SUPPORT_ON_OFF, SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_TEMPERATURE, STATE_HEAT,
STATE_IDLE, STATE_MANUAL, STATE_DRY,
STATE_FAN_ONLY, STATE_ECO, ClimateDevice)
from homeassistant.const import (
ATTR_TEMPERATURE, CONF_NAME, TEMP_CELSIUS)
from homeassistant.core import callback
from homeassistant.components.knx import DATA_KNX, ATTR_DISCOVER_DEVICES
CONF_SETPOINT_SHIFT_ADDRESS = 'setpoint_shift_address'
CONF_SETPOINT_SHIFT_STATE_ADDRESS = 'setpoint_shift_state_address'
CONF_SETPOINT_SHIFT_STEP = 'setpoint_shift_step'
CONF_SETPOINT_SHIFT_MAX = 'setpoint_shift_max'
CONF_SETPOINT_SHIFT_MIN = 'setpoint_shift_min'
CONF_TEMPERATURE_ADDRESS = 'temperature_address'
CONF_TARGET_TEMPERATURE_ADDRESS = 'target_temperature_address'
CONF_OPERATION_MODE_ADDRESS = 'operation_mode_address'
CONF_OPERATION_MODE_STATE_ADDRESS = 'operation_mode_state_address'
CONF_CONTROLLER_STATUS_ADDRESS = 'controller_status_address'
CONF_CONTROLLER_STATUS_STATE_ADDRESS = 'controller_status_state_address'
CONF_CONTROLLER_MODE_ADDRESS = 'controller_mode_address'
CONF_CONTROLLER_MODE_STATE_ADDRESS = 'controller_mode_state_address'
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS = \
'operation_mode_frost_protection_address'
CONF_OPERATION_MODE_NIGHT_ADDRESS = 'operation_mode_night_address'
CONF_OPERATION_MODE_COMFORT_ADDRESS = 'operation_mode_comfort_address'
CONF_OPERATION_MODES = 'operation_modes'
CONF_ON_OFF_ADDRESS = 'on_off_address'
CONF_ON_OFF_STATE_ADDRESS = 'on_off_state_address'
CONF_MIN_TEMP = 'min_temp'
CONF_MAX_TEMP = 'max_temp'
DEFAULT_NAME = 'KNX Climate'
DEFAULT_SETPOINT_SHIFT_STEP = 0.5
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEPENDENCIES = ['knx']
# Map KNX operation modes to HA modes. This list might not be full.
OPERATION_MODES = {
# Map DPT 201.100 HVAC operating modes
"Frost Protection": STATE_MANUAL,
"Night": STATE_IDLE,
"Standby": STATE_ECO,
"Comfort": STATE_HEAT,
# Map DPT 201.104 HVAC control modes
"Fan only": STATE_FAN_ONLY,
"Dehumidification": STATE_DRY
}
OPERATION_MODES_INV = dict((
reversed(item) for item in OPERATION_MODES.items()))
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_TEMPERATURE_ADDRESS): cv.string,
vol.Required(CONF_TARGET_TEMPERATURE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STATE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STEP,
default=DEFAULT_SETPOINT_SHIFT_STEP): vol.All(
float, vol.Range(min=0, max=2)),
vol.Optional(CONF_SETPOINT_SHIFT_MAX, default=DEFAULT_SETPOINT_SHIFT_MAX):
vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(CONF_SETPOINT_SHIFT_MIN, default=DEFAULT_SETPOINT_SHIFT_MIN):
vol.All(int, vol.Range(min=-32, max=0)),
vol.Optional(CONF_OPERATION_MODE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_NIGHT_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_COMFORT_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODES): vol.All(cv.ensure_list,
[vol.In(OPERATION_MODES)]),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up climate(s) for KNX platform."""
if discovery_info is not None:
async_add_entities_discovery(hass, discovery_info, async_add_entities)
else:
async_add_entities_config(hass, config, async_add_entities)
@callback
def async_add_entities_discovery(hass, discovery_info, async_add_entities):
"""Set up climates for KNX platform configured within platform."""
entities = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
entities.append(KNXClimate(device))
async_add_entities(entities)
@callback
def async_add_entities_config(hass, config, async_add_entities):
"""Set up climate for KNX platform configured within platform."""
import xknx
climate_mode = xknx.devices.ClimateMode(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME) + " Mode",
group_address_operation_mode=config.get(CONF_OPERATION_MODE_ADDRESS),
group_address_operation_mode_state=config.get(
CONF_OPERATION_MODE_STATE_ADDRESS),
group_address_controller_status=config.get(
CONF_CONTROLLER_STATUS_ADDRESS),
group_address_controller_status_state=config.get(
CONF_CONTROLLER_STATUS_STATE_ADDRESS),
group_address_controller_mode=config.get(
CONF_CONTROLLER_MODE_ADDRESS),
group_address_controller_mode_state=config.get(
CONF_CONTROLLER_MODE_STATE_ADDRESS),
group_address_operation_mode_protection=config.get(
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS),
group_address_operation_mode_night=config.get(
CONF_OPERATION_MODE_NIGHT_ADDRESS),
group_address_operation_mode_comfort=config.get(
CONF_OPERATION_MODE_COMFORT_ADDRESS),
operation_modes=config.get(
CONF_OPERATION_MODES))
hass.data[DATA_KNX].xknx.devices.add(climate_mode)
climate = xknx.devices.Climate(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME),
group_address_temperature=config.get(CONF_TEMPERATURE_ADDRESS),
group_address_target_temperature=config.get(
CONF_TARGET_TEMPERATURE_ADDRESS),
group_address_setpoint_shift=config.get(CONF_SETPOINT_SHIFT_ADDRESS),
group_address_setpoint_shift_state=config.get(
CONF_SETPOINT_SHIFT_STATE_ADDRESS),
setpoint_shift_step=config.get(CONF_SETPOINT_SHIFT_STEP),
setpoint_shift_max=config.get(CONF_SETPOINT_SHIFT_MAX),
setpoint_shift_min=config.get(CONF_SETPOINT_SHIFT_MIN),
group_address_on_off=config.get(
CONF_ON_OFF_ADDRESS),
group_address_on_off_state=config.get(
CONF_ON_OFF_STATE_ADDRESS),
min_temp=config.get(CONF_MIN_TEMP),
max_temp=config.get(CONF_MAX_TEMP),
mode=climate_mode)
hass.data[DATA_KNX].xknx.devices.add(climate)
async_add_entities([KNXClimate(climate)])
class KNXClimate(ClimateDevice):
"""Representation of a KNX climate device."""
def __init__(self, device):
"""Initialize of a KNX climate device."""
self.device = device
self._unit_of_measurement = TEMP_CELSIUS
@property
def supported_features(self):
"""Return the list of supported features."""
support = SUPPORT_TARGET_TEMPERATURE
if self.device.mode.supports_operation_mode:
support |= SUPPORT_OPERATION_MODE
if self.device.supports_on_off:
support |= SUPPORT_ON_OFF
return support
async def async_added_to_hass(self):
"""Register callbacks to update hass after device was changed."""
async def after_update_callback(device):
"""Call after device was updated."""
await self.async_update_ha_state()
self.device.register_device_updated_cb(after_update_callback)
@property
def name(self):
"""Return the name of the KNX device."""
return self.device.name
@property
def available(self):
"""Return True if entity is available."""
return self.hass.data[DATA_KNX].connected
@property
def should_poll(self):
"""No polling needed within KNX."""
return False
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_temperature(self):
"""Return the current temperature."""
return self.device.temperature.value
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self.device.setpoint_shift_step
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.device.target_temperature.value
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.device.target_temperature_min
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.device.target_temperature_max
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
await self.device.set_target_temperature(temperature)
await self.async_update_ha_state()
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if self.device.mode.supports_operation_mode:
return OPERATION_MODES.get(self.device.mode.operation_mode.value)
return None
@property
def operation_list(self):
"""Return the list of available operation modes."""
return [OPERATION_MODES.get(operation_mode.value) for
operation_mode in
self.device.mode.operation_modes]
async def async_set_operation_mode(self, operation_mode):
"""Set operation mode."""
if self.device.mode.supports_operation_mode:
from xknx.knx import HVACOperationMode
knx_operation_mode = HVACOperationMode(
OPERATION_MODES_INV.get(operation_mode))
await self.device.mode.set_operation_mode(knx_operation_mode)
await self.async_update_ha_state()
@property
def is_on(self):
"""Return true if the device is on."""
if self.device.supports_on_off:
return self.device.is_on
return None
async def async_turn_on(self):
"""Turn on."""
await self.device.turn_on()
async def async_turn_off(self):
"""Turn off."""
await self.device.turn_off()
| PetePriority/home-assistant | homeassistant/components/knx/climate.py | Python | apache-2.0 | 11,010 |
"""
WSGI config for comic project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
SITE_ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(SITE_ROOT)
sys.path.append(os.path.join(SITE_ROOT,"comic"))
os.environ.setdefault("PYTHON_EGG_CACHE", "/tmp/")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "comic.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| cpatrick/comic-django | django/comic/wsgi.py | Python | apache-2.0 | 1,360 |
import httplib
from pyamf import AMF0, AMF3
from pyamf import remoting
from pyamf.remoting.client import RemotingService
height = 1080
def build_amf_request(const, playerID, videoPlayer, publisherID):
env = remoting.Envelope(amfVersion=3)
env.bodies.append(
(
"/1",
remoting.Request(
target="com.brightcove.player.runtime.PlayerMediaFacade.findMediaById",
body=[const, playerID, videoPlayer, publisherID],
envelope=env
)
)
)
return env
def get_clip_info(const, playerID, videoPlayer, publisherID, playerKey):
conn = httplib.HTTPConnection("c.brightcove.com")
envelope = build_amf_request(const, playerID, videoPlayer, publisherID)
conn.request("POST", "/services/messagebroker/amf?playerKey=" + playerKey, str(remoting.encode(envelope).read()), {'content-type': 'application/x-amf'})
response = conn.getresponse().read()
response = remoting.decode(response).bodies[0][1].body
return response
def play(const, playerID, videoPlayer, publisherID, playerKey):
rtmpdata = get_clip_info(const, playerID, videoPlayer, publisherID, playerKey)
streamName = ""
streamUrl = rtmpdata['FLVFullLengthURL'];
for item in sorted(rtmpdata['renditions'], key=lambda item:item['frameHeight'], reverse=False):
streamHeight = item['frameHeight']
if streamHeight <= height:
streamUrl = item['defaultURL']
streamName = streamName + rtmpdata['displayName']
return [streamName, streamUrl];
| aplicatii-romanesti/allinclusive-kodi-pi | .kodi/addons/plugin.video.kidsplace/brightcovePlayer.py | Python | apache-2.0 | 1,587 |
"""
Overview of all settings which can be customized.
"""
from django.conf import settings
from parler import appsettings as parler_appsettings
FLUENT_CONTENTS_CACHE_OUTPUT = getattr(settings, 'FLUENT_CONTENTS_CACHE_OUTPUT', True)
FLUENT_CONTENTS_PLACEHOLDER_CONFIG = getattr(settings, 'FLUENT_CONTENTS_PLACEHOLDER_CONFIG', {})
# Note: the default language setting is used during the migrations
FLUENT_DEFAULT_LANGUAGE_CODE = getattr(settings, 'FLUENT_DEFAULT_LANGUAGE_CODE', parler_appsettings.PARLER_DEFAULT_LANGUAGE_CODE)
FLUENT_CONTENTS_DEFAULT_LANGUAGE_CODE = getattr(settings, 'FLUENT_CONTENTS_DEFAULT_LANGUAGE_CODE', FLUENT_DEFAULT_LANGUAGE_CODE)
| pombredanne/django-fluent-contents | fluent_contents/appsettings.py | Python | apache-2.0 | 658 |
# File permute.py
def permute1(seq):
if not seq: # Shuffle any sequence: list
return [seq] # Empty sequence
else:
res = []
for i in range(len(seq)):
rest = seq[:i] + seq[i+1:] # Delete current node
for x in permute1(rest): # Permute the others
res.append(seq[i:i+1] + x) # Add node at front
return res
def permute2(seq):
if not seq: # Shuffle any sequence: generator
yield seq # Empty sequence
else:
for i in range(len(seq)):
rest = seq[:i] + seq[i+1:] # Delete current node
for x in permute2(rest): # Permute the others
yield seq[i:i+1] + x # Add node at front
| dreadrel/UWF_2014_spring_COP3990C-2507 | notebooks/scripts/book_code/code/permute.py | Python | apache-2.0 | 865 |
from __future__ import absolute_import, unicode_literals
import django
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .managers import QueueManager, MessageManager
class Queue(models.Model):
name = models.CharField(_('name'), max_length=200, unique=True)
objects = QueueManager()
class Meta:
if django.VERSION >= (1, 7):
app_label = 'karellen_kombu_transport_django'
db_table = 'djkombu_queue'
verbose_name = _('queue')
verbose_name_plural = _('queues')
class Message(models.Model):
visible = models.BooleanField(default=True, db_index=True)
sent_at = models.DateTimeField(null=True, blank=True, db_index=True,
auto_now_add=True)
payload = models.TextField(_('payload'), null=False)
queue = models.ForeignKey(Queue, related_name='messages')
objects = MessageManager()
class Meta:
if django.VERSION >= (1, 7):
app_label = 'karellen_kombu_transport_django'
db_table = 'djkombu_message'
verbose_name = _('message')
verbose_name_plural = _('messages')
| arcivanov/karellen-kombu-ext | src/main/python/karellen/kombu/transport/django/models.py | Python | apache-2.0 | 1,161 |
from givabit.backend.charity import Charity
from givabit.backend.errors import MissingValueException, MultipleValueException
from givabit.test_common import test_data
from givabit.test_common import test_utils
class CharityRepositoryTest(test_utils.TestCase):
def setUp(self):
super(CharityRepositoryTest, self).setUp()
self.all_charities = [test_data.c1, test_data.c2, test_data.c3, test_data.c4]
for charity in self.all_charities:
self.charity_repo.add_or_update_charity(charity)
def test_lists_charities(self):
self.assertSequenceEqual(self.charity_repo.list_charities(), self.all_charities)
def test_gets_single_charity(self):
self.assertEqual(self.charity_repo.get_charity('Shelter'), test_data.c1)
self.assertEqual(self.charity_repo.get_charity('Oxfam'), test_data.c2)
with self.assertRaises(MissingValueException):
self.charity_repo.get_charity('Does not exist')
try:
self.charity_repo.get_charity('BHF')
except MultipleValueException, e:
self.assertSequenceEqual(e.values, [test_data.c3, test_data.c4])
def test_gets_charity_by_id(self):
self.assertEquals(self.charity_repo.get_charity(id=test_data.c1.key().id()), test_data.c1)
def test_getting_missing_charity_by_id_throws(self):
missing_id = 0
while missing_id in map(lambda charity: charity.key().id(), self.all_charities):
missing_id += 1
with self.assertRaises(MissingValueException):
self.charity_repo.get_charity(id=missing_id)
| illicitonion/givabit | src/givabit/backend/charity_repository_test.py | Python | apache-2.0 | 1,597 |
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
from google.appengine.ext import db
import mc_unittest
from rogerthat.models import CompressedIntegerListExpando
class TestCase(mc_unittest.TestCase):
l = [1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1]
def setUp(self, datastore_hr_probability=0):
mc_unittest.TestCase.setUp(self, datastore_hr_probability=datastore_hr_probability)
class MyModel(db.Expando):
pass
m = MyModel(key_name='test')
m.test = TestCase.l
m.put()
def test_get_custom_prop(self):
class MyModel(CompressedIntegerListExpando):
_attribute_prefix = 'test'
m = MyModel.get_by_key_name('test')
self.assertListEqual(TestCase.l, m.test)
dict_repr = db.to_dict(m)
self.assertTrue(isinstance(dict_repr['test'], basestring))
def test_append(self):
class MyModel(CompressedIntegerListExpando):
_attribute_prefix = 'test'
m = MyModel.get_by_key_name('test')
m.test.append(5)
m.put()
m = MyModel.get_by_key_name('test')
self.assertListEqual(TestCase.l + [5], m.test)
def test_ljust(self):
class MyModel(CompressedIntegerListExpando):
_attribute_prefix = 'test'
m = MyModel.get_by_key_name('test')
print 'Before: %r' % m.test
m.test.ljust(5, 0, 10) # will append 5 zeroes, and limit the number of entries to 10
print 'After: %r' % m.test
expected = (TestCase.l + 5 * [0])[-10:] # [1, 0, 0, 0, 1, 0, 0, 0, 0, 0]
print 'Expected: %r' % expected
m.put()
m = MyModel.get_by_key_name('test')
self.assertListEqual(expected, m.test)
| rogerthat-platform/rogerthat-backend | src-test/rogerthat_tests/mobicage/models/test_compressed_int_list.py | Python | apache-2.0 | 2,303 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import collections
from contextlib import contextmanager
import pytest
import six
from cryptography.exceptions import UnsupportedAlgorithm
import cryptography_vectors
HashVector = collections.namedtuple("HashVector", ["message", "digest"])
KeyedHashVector = collections.namedtuple(
"KeyedHashVector", ["message", "digest", "key"]
)
def select_backends(names, backend_list):
if names is None:
return backend_list
split_names = [x.strip() for x in names.split(',')]
# this must be duplicated and then removed to preserve the metadata
# pytest associates. Appending backends to a new list doesn't seem to work
selected_backends = []
for backend in backend_list:
if backend.name in split_names:
selected_backends.append(backend)
if len(selected_backends) > 0:
return selected_backends
else:
raise ValueError(
"No backend selected. Tried to select: {0}".format(split_names)
)
def check_for_iface(name, iface, item):
if name in item.keywords and "backend" in item.funcargs:
if not isinstance(item.funcargs["backend"], iface):
pytest.skip("{0} backend does not support {1}".format(
item.funcargs["backend"], name
))
def check_backend_support(item):
supported = item.keywords.get("supported")
if supported and "backend" in item.funcargs:
if not supported.kwargs["only_if"](item.funcargs["backend"]):
pytest.skip("{0} ({1})".format(
supported.kwargs["skip_message"], item.funcargs["backend"]
))
elif supported:
raise ValueError("This mark is only available on methods that take a "
"backend")
@contextmanager
def raises_unsupported_algorithm(reason):
with pytest.raises(UnsupportedAlgorithm) as exc_info:
yield exc_info
assert exc_info.value._reason is reason
def load_vectors_from_file(filename, loader):
with cryptography_vectors.open_vector_file(filename) as vector_file:
return loader(vector_file)
def load_nist_vectors(vector_data):
test_data = None
data = []
for line in vector_data:
line = line.strip()
# Blank lines, comments, and section headers are ignored
if not line or line.startswith("#") or (line.startswith("[")
and line.endswith("]")):
continue
if line.strip() == "FAIL":
test_data["fail"] = True
continue
# Build our data using a simple Key = Value format
name, value = [c.strip() for c in line.split("=")]
# Some tests (PBKDF2) contain \0, which should be interpreted as a
# null character rather than literal.
value = value.replace("\\0", "\0")
# COUNT is a special token that indicates a new block of data
if name.upper() == "COUNT":
test_data = {}
data.append(test_data)
continue
# For all other tokens we simply want the name, value stored in
# the dictionary
else:
test_data[name.lower()] = value.encode("ascii")
return data
def load_cryptrec_vectors(vector_data):
cryptrec_list = []
for line in vector_data:
line = line.strip()
# Blank lines and comments are ignored
if not line or line.startswith("#"):
continue
if line.startswith("K"):
key = line.split(" : ")[1].replace(" ", "").encode("ascii")
elif line.startswith("P"):
pt = line.split(" : ")[1].replace(" ", "").encode("ascii")
elif line.startswith("C"):
ct = line.split(" : ")[1].replace(" ", "").encode("ascii")
# after a C is found the K+P+C tuple is complete
# there are many P+C pairs for each K
cryptrec_list.append({
"key": key,
"plaintext": pt,
"ciphertext": ct
})
else:
raise ValueError("Invalid line in file '{}'".format(line))
return cryptrec_list
def load_hash_vectors(vector_data):
vectors = []
key = None
msg = None
md = None
for line in vector_data:
line = line.strip()
if not line or line.startswith("#") or line.startswith("["):
continue
if line.startswith("Len"):
length = int(line.split(" = ")[1])
elif line.startswith("Key"):
# HMAC vectors contain a key attribute. Hash vectors do not.
key = line.split(" = ")[1].encode("ascii")
elif line.startswith("Msg"):
# In the NIST vectors they have chosen to represent an empty
# string as hex 00, which is of course not actually an empty
# string. So we parse the provided length and catch this edge case.
msg = line.split(" = ")[1].encode("ascii") if length > 0 else b""
elif line.startswith("MD"):
md = line.split(" = ")[1]
# after MD is found the Msg+MD (+ potential key) tuple is complete
if key is not None:
vectors.append(KeyedHashVector(msg, md, key))
key = None
msg = None
md = None
else:
vectors.append(HashVector(msg, md))
msg = None
md = None
else:
raise ValueError("Unknown line in hash vector")
return vectors
def load_pkcs1_vectors(vector_data):
"""
Loads data out of RSA PKCS #1 vector files.
"""
private_key_vector = None
public_key_vector = None
attr = None
key = None
example_vector = None
examples = []
vectors = []
for line in vector_data:
if (
line.startswith("# PSS Example") or
line.startswith("# PKCS#1 v1.5 Signature")
):
if example_vector:
for key, value in six.iteritems(example_vector):
hex_str = "".join(value).replace(" ", "").encode("ascii")
example_vector[key] = hex_str
examples.append(example_vector)
attr = None
example_vector = collections.defaultdict(list)
if line.startswith("# Message to be signed"):
attr = "message"
continue
elif line.startswith("# Salt"):
attr = "salt"
continue
elif line.startswith("# Signature"):
attr = "signature"
continue
elif (
example_vector and
line.startswith("# =============================================")
):
for key, value in six.iteritems(example_vector):
hex_str = "".join(value).replace(" ", "").encode("ascii")
example_vector[key] = hex_str
examples.append(example_vector)
example_vector = None
attr = None
elif example_vector and line.startswith("#"):
continue
else:
if attr is not None and example_vector is not None:
example_vector[attr].append(line.strip())
continue
if (
line.startswith("# Example") or
line.startswith("# =============================================")
):
if key:
assert private_key_vector
assert public_key_vector
for key, value in six.iteritems(public_key_vector):
hex_str = "".join(value).replace(" ", "")
public_key_vector[key] = int(hex_str, 16)
for key, value in six.iteritems(private_key_vector):
hex_str = "".join(value).replace(" ", "")
private_key_vector[key] = int(hex_str, 16)
private_key_vector["examples"] = examples
examples = []
assert (
private_key_vector['public_exponent'] ==
public_key_vector['public_exponent']
)
assert (
private_key_vector['modulus'] ==
public_key_vector['modulus']
)
vectors.append(
(private_key_vector, public_key_vector)
)
public_key_vector = collections.defaultdict(list)
private_key_vector = collections.defaultdict(list)
key = None
attr = None
if private_key_vector is None or public_key_vector is None:
continue
if line.startswith("# Private key"):
key = private_key_vector
elif line.startswith("# Public key"):
key = public_key_vector
elif line.startswith("# Modulus:"):
attr = "modulus"
elif line.startswith("# Public exponent:"):
attr = "public_exponent"
elif line.startswith("# Exponent:"):
if key is public_key_vector:
attr = "public_exponent"
else:
assert key is private_key_vector
attr = "private_exponent"
elif line.startswith("# Prime 1:"):
attr = "p"
elif line.startswith("# Prime 2:"):
attr = "q"
elif line.startswith("# Prime exponent 1:"):
attr = "dmp1"
elif line.startswith("# Prime exponent 2:"):
attr = "dmq1"
elif line.startswith("# Coefficient:"):
attr = "iqmp"
elif line.startswith("#"):
attr = None
else:
if key is not None and attr is not None:
key[attr].append(line.strip())
return vectors
def load_rsa_nist_vectors(vector_data):
test_data = None
p = None
salt_length = None
data = []
for line in vector_data:
line = line.strip()
# Blank lines and section headers are ignored
if not line or line.startswith("["):
continue
if line.startswith("# Salt len:"):
salt_length = int(line.split(":")[1].strip())
continue
elif line.startswith("#"):
continue
# Build our data using a simple Key = Value format
name, value = [c.strip() for c in line.split("=")]
if name == "n":
n = int(value, 16)
elif name == "e" and p is None:
e = int(value, 16)
elif name == "p":
p = int(value, 16)
elif name == "q":
q = int(value, 16)
elif name == "SHAAlg":
if p is None:
test_data = {
"modulus": n,
"public_exponent": e,
"salt_length": salt_length,
"algorithm": value,
"fail": False
}
else:
test_data = {
"modulus": n,
"p": p,
"q": q,
"algorithm": value
}
if salt_length is not None:
test_data["salt_length"] = salt_length
data.append(test_data)
elif name == "e" and p is not None:
test_data["public_exponent"] = int(value, 16)
elif name == "d":
test_data["private_exponent"] = int(value, 16)
elif name == "Result":
test_data["fail"] = value.startswith("F")
# For all other tokens we simply want the name, value stored in
# the dictionary
else:
test_data[name.lower()] = value.encode("ascii")
return data
def load_fips_dsa_key_pair_vectors(vector_data):
"""
Loads data out of the FIPS DSA KeyPair vector files.
"""
vectors = []
# When reading_key_data is set to True it tells the loader to continue
# constructing dictionaries. We set reading_key_data to False during the
# blocks of the vectors of N=224 because we don't support it.
reading_key_data = True
for line in vector_data:
line = line.strip()
if not line or line.startswith("#"):
continue
elif line.startswith("[mod = L=1024"):
continue
elif line.startswith("[mod = L=2048, N=224"):
reading_key_data = False
continue
elif line.startswith("[mod = L=2048, N=256"):
reading_key_data = True
continue
elif line.startswith("[mod = L=3072"):
continue
if not reading_key_data:
continue
elif reading_key_data:
if line.startswith("P"):
vectors.append({'p': int(line.split("=")[1], 16)})
elif line.startswith("Q"):
vectors[-1]['q'] = int(line.split("=")[1], 16)
elif line.startswith("G"):
vectors[-1]['g'] = int(line.split("=")[1], 16)
elif line.startswith("X") and 'x' not in vectors[-1]:
vectors[-1]['x'] = int(line.split("=")[1], 16)
elif line.startswith("X") and 'x' in vectors[-1]:
vectors.append({'p': vectors[-1]['p'],
'q': vectors[-1]['q'],
'g': vectors[-1]['g'],
'x': int(line.split("=")[1], 16)
})
elif line.startswith("Y"):
vectors[-1]['y'] = int(line.split("=")[1], 16)
return vectors
| Lukasa/cryptography | tests/utils.py | Python | apache-2.0 | 14,122 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.typing import JaxArray
from semi_supervised_domain_adaptation.lib.data import MixData, CTAData
from semi_supervised_domain_adaptation.lib.train import TrainableSSDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class Baseline(TrainableSSDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu):
c, h, w = sx.shape[-3:]
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx = jn.split(logit, (2 * sx.shape[0],))[0]
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.params.wd * wd
return loss, {'losses/xe': xe, 'losses/wd': wd}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tx, ty, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(jn.concatenate((sx, tx)), jn.concatenate((sy, ty)), tu)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target_name, target_samples_per_class, target_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}_{FLAGS.target}')
target_labeled = SSL_DATASETS()[target_name](target_samples_per_class, target_seed)
target_unlabeled = FSL_DATASETS()[f'{target_name}-0']()
testsets = [target_unlabeled.test, source.test] # Ordered by domain (unlabeled always first)
module = Baseline(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
uratio=FLAGS.uratio)
logdir = f'SSDA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/'
logdir += '_'.join(sorted('%s%s' % k for k in module.params.items()))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((k, v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph(10,seed=1)', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
| google-research/adamatch | semi_supervised_domain_adaptation/baseline.py | Python | apache-2.0 | 7,016 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from mock import Mock
from mock import patch
from airflow import configuration
from airflow.contrib.hooks.jira_hook import JiraHook
from airflow import models
from airflow.utils import db
jira_client_mock = Mock(
name="jira_client"
)
class TestJiraHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='jira_default', conn_type='jira',
host='https://localhost/jira/', port=443,
extra='{"verify": "False", "project": "AIRFLOW"}'))
@patch("airflow.contrib.hooks.jira_hook.JIRA", autospec=True,
return_value=jira_client_mock)
def test_jira_client_connection(self, jira_mock):
jira_hook = JiraHook()
self.assertTrue(jira_mock.called)
self.assertIsInstance(jira_hook.client, Mock)
self.assertEqual(jira_hook.client.name, jira_mock.return_value.name)
if __name__ == '__main__':
unittest.main()
| akosel/incubator-airflow | tests/contrib/hooks/test_jira_hook.py | Python | apache-2.0 | 1,861 |
import sys
import click
from solar.core import testing
from solar.core import resource
from solar.system_log import change
from solar.system_log import operations
from solar.system_log import data
from solar.cli.uids_history import get_uid, remember_uid, SOLARUID
@click.group()
def changes():
pass
@changes.command()
def validate():
errors = resource.validate_resources()
if errors:
for r, error in errors:
print 'ERROR: %s: %s' % (r.name, error)
sys.exit(1)
@changes.command()
@click.option('-d', default=False, is_flag=True)
def stage(d):
log = list(change.stage_changes().reverse())
for item in log:
click.echo(item)
if d:
for line in item.details:
click.echo(' '*4+line)
if not log:
click.echo('No changes')
@changes.command(name='staged-item')
@click.argument('log_action')
def staged_item(log_action):
item = data.SL().get(log_action)
if not item:
click.echo('No staged changes for {}'.format(log_action))
else:
click.echo(item)
for line in item.details:
click.echo(' '*4+line)
@changes.command()
def process():
uid = change.send_to_orchestration()
remember_uid(uid)
click.echo(uid)
@changes.command()
@click.argument('uid', type=SOLARUID)
def commit(uid):
operations.commit(uid)
@changes.command()
@click.option('-n', default=5)
def history(n):
commited = list(data.CL().collection(n))
if not commited:
click.echo('No history.')
return
commited.reverse()
click.echo(commited)
@changes.command()
def test():
results = testing.test_all()
for name, result in results.items():
msg = '[{status}] {name} {message}'
kwargs = {
'name': name,
'message': '',
'status': 'OK',
}
if result['status'] == 'ok':
kwargs['status'] = click.style('OK', fg='green')
else:
kwargs['status'] = click.style('ERROR', fg='red')
kwargs['message'] = result['message']
click.echo(msg.format(**kwargs))
@changes.command(name='clean-history')
def clean_history():
data.CL().clean()
data.CD().clean()
| dshulyak/solar | solar/solar/cli/system_log.py | Python | apache-2.0 | 2,234 |
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from os.path import basename, splitext
import codecs
from robot.htmldata import HtmlFileWriter, ModelWriter, LOG, REPORT
from robot.utils import utf8open
from .jswriter import JsResultWriter, SplitLogWriter
class _LogReportWriter(object):
def __init__(self, js_model):
self._js_model = js_model
def _write_file(self, path, config, template):
outfile = codecs.open(path, 'wb', encoding='UTF-8')\
if isinstance(path, basestring) else path # unit test hook
with outfile:
model_writer = RobotModelWriter(outfile, self._js_model, config)
writer = HtmlFileWriter(outfile, model_writer)
writer.write(template)
class RobotModelWriter(ModelWriter):
def __init__(self, output, model, config):
self._output = output
self._model = model
self._config = config
def write(self, line):
JsResultWriter(self._output).write(self._model, self._config)
class LogWriter(_LogReportWriter):
def write(self, path, config):
self._write_file(path, config, LOG)
if self._js_model.split_results:
self._write_split_logs(splitext(path)[0])
def _write_split_logs(self, base):
for index, (keywords, strings) in enumerate(self._js_model.split_results):
index += 1 # enumerate accepts start index only in Py 2.6+
self._write_split_log(index, keywords, strings, '%s-%d.js' % (base, index))
def _write_split_log(self, index, keywords, strings, path):
with utf8open(path, 'wb') as outfile:
writer = SplitLogWriter(outfile)
writer.write(keywords, strings, index, basename(path))
class ReportWriter(_LogReportWriter):
def write(self, path, config):
self._write_file(path, config, REPORT)
| Senseg/robotframework | src/robot/reporting/logreportwriters.py | Python | apache-2.0 | 2,447 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Contains the logic for `aq del cluster systemlist --hostname`. """
from aquilon.aqdb.model import SystemList
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.del_cluster_member_priority import \
CommandDelClusterMemberPriority
class CommandDelClusterSystemList(CommandDelClusterMemberPriority):
required_parameters = ["cluster", "hostname"]
resource_class = SystemList
def render(self, hostname, **kwargs):
super(CommandDelClusterSystemList, self).render(hostname=None,
metacluster=None,
comments=None,
member=hostname,
**kwargs)
| quattor/aquilon | lib/aquilon/worker/commands/del_cluster_systemlist.py | Python | apache-2.0 | 1,563 |
#!/usr/bin/env python
from __future__ import with_statement
import argparse
import sys
import logging
import urllib, urllib2
import json
from fabric.operations import local
from fabric.api import hide
import yaml
VERSION = "0.0.1"
SERVER_FILE = ".server"
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def get_repo_info():
with hide('commands'):
f_out = local('git remote -v|grep push|grep origin', capture = True)
remote_git = ""
start = f_out.find("http")
end = f_out.find(".git")
remote_git = f_out[start:end]
repo_name = remote_git[remote_git.rfind('/')+1:]
return repo_name
def get_current_branch():
with hide('commands'):
f_out = local('git branch', capture = True)
start = f_out.find('* ')
end = f_out.find('\n')
branch = f_out[start+2:end]
return branch
def get_last_hash():
with hide('commands'):
f_out = local('git rev-parse HEAD', capture = True)
start = 0
end = f_out.find('\n')
branch = f_out[start:end]
return branch
class Server(object):
def __init__(self):
try:
with open(".server") as f:
self.address = f.readlines()[0]
self.repo = get_repo_info()
self.current_branch = get_current_branch()
ok = self.post_to_server('info')
logging.debug("endpoint: %s" % (ok))
except IOError:
self.address = None
def parse_yaml(self,yaml_file):
try:
data = yaml.load(yaml_file.read())
if data is not None:
return data
return False
except Exception as e:
logging.error(e)
return False
""" Run a normal client deployment """
def deploy(self, git_hash = None):
if git_hash is None:
git_hash = get_last_hash()
deploy = {'hash': git_hash, 'branch': get_current_branch()}
req = self.post_to_server("deploy", deploy)
result = json.loads(req)
self.parse_server_response(result)
def parse_server_response(self,result):
if result['status'] == "ok":
print result['msg']
else:
logging.error(result)
print ("Error occured: %s" % (result['msg']))
sys.exit()
"""" Sends a new init configuration for deployment on a branch and current repo """
def init_config(self, config_file):
conf = {'conf':self.parse_yaml(config_file)}
if not conf['conf']:
print "Your config file could not be parsed"
sys.exit()
req = self.post_to_server("init.config", conf)
result = json.loads(req)
self.parse_server_response(result)
""" Creates the base url for the api """
def get_base_url(self, command = None):
return {
'info': 'http://%s' % (self.address),
'init.config': 'http://%s/api/%s/init/' % (self.address, self.repo),
'deploy': 'http://%s/api/%s/deploy/' % (self.address, self.repo),
}.get(command, 'http://%s/api/%s' % (self.address, self.repo))
""" Post requests to deploy server """
def post_to_server(self, command = None, data_dict = None):
if self.address is not None:
url_2 = self.get_base_url(command)
if data_dict is not None:
logging.debug("sending post data: %s to: %s" % (data_dict, url_2))
data = urllib.urlencode(data_dict)
req = urllib2.Request(url_2, data)
try:
rsp = urllib2.urlopen(req)
except urllib2.URLError, e:
logging.error("Error 2: couldn't communicate with the server on: %s" % (url_2))
sys.exit()
else:
req = urllib2.Request(url_2)
try:
logging.debug("executing get on: %s" % (url_2))
rsp = urllib2.urlopen(req)
except urllib2.URLError, e:
logging.error("Error 3: couldn't communicate with the server on: %s" % (url_2))
sys.exit()
return rsp.read()
else:
logging.error("Error 4: Can't comunicate with the server")
sys.exit()
class DeployAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
logging.debug('DeployAction %r %r %r' % (namespace, values, option_string))
setattr(namespace, self.dest, values)
if values is None:
server.deploy()
else:
server.deploy(values)
""" This will read a local config yaml which will be sent to the server
If the server will have this repo and branch already configured
an error will be trigered.
This method can't be used to overwrite config data """
class InitAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
logging.debug('%r %r %r' % (namespace, values, option_string))
setattr(namespace, self.dest, values)
server.init_config(values)
# TODO verify with the server if exists already an initiated config for this repo
# if exists an error will be displayed
class SetupAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
logging.debug('%r %r %r' % (namespace, values, option_string))
setattr(namespace, self.dest, values)
server = values
# write hidden file with the server address
f = open(SERVER_FILE,'w')
f.write('%s' %(server)) # python will convert \n to os.linesep
f.close()
server = Server()
parser = argparse.ArgumentParser(description = 'Nursery deplkoy system')
parser.add_argument('-v','--version', action = 'version', version = '%(prog)s '+VERSION)
parser.add_argument('-s','--setup', nargs='?', metavar='Server', action = SetupAction,help = 'setup a nursery deploy system, you need to specify the nursery server endpoint like: http://www.my-nursery-server.com')
# each branch needs it's own config file
parser.add_argument('-c','--config', metavar='config.yaml', action = InitAction, type = file,help = 'init a new repo deployment with config file you specify')
parser.add_argument('-d','--deploy',nargs='?', metavar='hash', action = DeployAction, type = file,help = 'create a new async deploy')
parser.add_argument('-i','--info', action='store_true', help = 'some info Nursery Client knows about')
if not len(sys.argv) > 1:
parser.print_help()
else:
args = parser.parse_args()
logging.debug(args)
if args.info:
if server.address is not None:
print ("remote deploy server: %s" % server.address)
print ("repo: %s" % server.repo)
print ("branch: %s" % server.current_branch)
# comication with the server - done
# setup server (with amazon credentials & stuff)
# initialize branch deploy with deploy server
# read config yaml and send it to the server - file sent - ok
# read the response and show it - ok
# read the file on the server - ok
#TODO
# on the server store the git deploy command so it can be processed assync
# 3 way to deploy git, client, forced
# - client
# client -> git deploy (last hash) -> ok
# store in db the command if allow_multiple_deploy & stuff
# parse the command assync
# build file list
# get instances
# get scripts
# make the deployment
# on the server we need to modelate this yaml file to the db
# find a good way to insert instances in db
# filter a deployment based on touced files
# make a deployment
| creyer/nursery | nursery.py | Python | apache-2.0 | 7,857 |
from typing import Dict, List, Optional
from ray.tune.suggest.suggestion import Searcher, ConcurrencyLimiter
from ray.tune.suggest.search_generator import SearchGenerator
from ray.tune.trial import Trial
class _MockSearcher(Searcher):
def __init__(self, **kwargs):
self.live_trials = {}
self.counter = {"result": 0, "complete": 0}
self.final_results = []
self.stall = False
self.results = []
super(_MockSearcher, self).__init__(**kwargs)
def suggest(self, trial_id: str):
if not self.stall:
self.live_trials[trial_id] = 1
return {"test_variable": 2}
return None
def on_trial_result(self, trial_id: str, result: Dict):
self.counter["result"] += 1
self.results += [result]
def on_trial_complete(
self, trial_id: str, result: Optional[Dict] = None, error: bool = False
):
self.counter["complete"] += 1
if result:
self._process_result(result)
if trial_id in self.live_trials:
del self.live_trials[trial_id]
def _process_result(self, result: Dict):
self.final_results += [result]
class _MockSuggestionAlgorithm(SearchGenerator):
def __init__(self, max_concurrent: Optional[int] = None, **kwargs):
self.searcher = _MockSearcher(**kwargs)
if max_concurrent:
self.searcher = ConcurrencyLimiter(
self.searcher, max_concurrent=max_concurrent
)
super(_MockSuggestionAlgorithm, self).__init__(self.searcher)
@property
def live_trials(self) -> List[Trial]:
return self.searcher.live_trials
@property
def results(self) -> List[Dict]:
return self.searcher.results
| ray-project/ray | python/ray/tune/suggest/_mock.py | Python | apache-2.0 | 1,752 |
# Copyright 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
import time
from oslo_messaging._drivers.protocols.amqp import controller
from oslo_messaging._i18n import _LW
from oslo_messaging import exceptions
from six import moves
LOG = logging.getLogger(__name__)
class SendTask(controller.Task):
"""A task that sends a message to a target, and optionally waits for a
reply message. The caller may block until the remote confirms receipt or
the reply message has arrived.
"""
def __init__(self, target, request, wait_for_reply, deadline):
super(SendTask, self).__init__()
self._target = target
self._request = request
self._deadline = deadline
self._wait_for_reply = wait_for_reply
self._results_queue = moves.queue.Queue()
def wait(self, timeout):
"""Wait for the send to complete, and, optionally, a reply message from
the remote. Will raise MessagingTimeout if the send does not complete
or no reply is received within timeout seconds. If the request has
failed for any other reason, a MessagingException is raised.
"""
try:
result = self._results_queue.get(timeout=timeout)
except moves.queue.Empty:
if self._wait_for_reply:
reason = "Timed out waiting for a reply."
else:
reason = "Timed out waiting for send to complete."
raise exceptions.MessagingTimeout(reason)
if result["status"] == "OK":
return result.get("response", None)
raise result["error"]
def execute(self, controller):
"""Runs on eventloop thread - sends request."""
if not self._deadline or self._deadline > time.time():
controller.request(self._target, self._request,
self._results_queue, self._wait_for_reply)
else:
LOG.warning(_LW("Send request to %s aborted: TTL expired."),
self._target)
class ListenTask(controller.Task):
"""A task that creates a subscription to the given target. Messages
arriving from the target are given to the listener.
"""
def __init__(self, target, listener, notifications=False):
"""Create a subscription to the target."""
super(ListenTask, self).__init__()
self._target = target
self._listener = listener
self._notifications = notifications
def execute(self, controller):
"""Run on the eventloop thread - subscribes to target. Inbound messages
are queued to the listener's incoming queue.
"""
if self._notifications:
controller.subscribe_notifications(self._target,
self._listener.incoming)
else:
controller.subscribe(self._target, self._listener.incoming)
class ReplyTask(controller.Task):
"""A task that sends 'response' message to 'address'.
"""
def __init__(self, address, response, log_failure):
super(ReplyTask, self).__init__()
self._address = address
self._response = response
self._log_failure = log_failure
self._wakeup = threading.Event()
def wait(self):
"""Wait for the controller to send the message.
"""
self._wakeup.wait()
def execute(self, controller):
"""Run on the eventloop thread - send the response message."""
controller.response(self._address, self._response)
self._wakeup.set()
| dukhlov/oslo.messaging | oslo_messaging/_drivers/protocols/amqp/drivertasks.py | Python | apache-2.0 | 4,126 |
import sys
sys.path.insert(1, "../../../")
import h2o
def binop_plus(ip,port):
# Connect to h2o
h2o.init(ip,port)
iris = h2o.import_frame(path=h2o.locate("smalldata/iris/iris_wheader_65_rows.csv"))
rows, cols = iris.dim()
iris.show()
###################################################################
# LHS: scaler, RHS: H2OFrame
res = 2 + iris
res_rows, res_cols = res.dim()
assert res_rows == rows and res_cols == cols, "dimension mismatch"
for x, y in zip([res[c].sum() for c in range(cols-1)], [469.9, 342.6, 266.9, 162.2]):
assert abs(x - y) < 1e-1, "expected same values"
# LHS: scaler, RHS: scaler
res = 2 + iris[0]
res2 = 1.1 + res[21,:]
assert abs(res2 - 8.2) < 1e-1, "expected same values"
###################################################################
# LHS: scaler, RHS: H2OFrame
res = 1.2 + iris[2]
res2 = res[21,:] + iris
res2.show()
# LHS: scaler, RHS: H2OVec
res = 1.2 + iris[2]
res2 = res[21,:] + iris[1]
res2.show()
# LHS: scaler, RHS: scaler
res = 1.1 + iris[2]
res2 = res[21,:] + res[10,:]
assert abs(res2 - 5.2) < 1e-1, "expected same values"
# LHS: scaler, RHS: scaler
res = 2 + iris[0]
res2 = res[21,:] + 3
assert abs(res2 - 10.1) < 1e-1, "expected same values"
###################################################################
# LHS: H2OVec, RHS: H2OFrame
#try:
# res = iris[2] + iris
# res.show()
# assert False, "expected error. objects with different dimensions not supported."
#except EnvironmentError:
# pass
# LHS: H2OVec, RHS: scaler
res = 1.2 + iris[2]
res2 = iris[1] + res[21,:]
res2.show()
###################################################################
# LHS: H2OFrame, RHS: H2OFrame
res = iris + iris
res_rows, res_cols = res.dim()
assert res_rows == rows and res_cols == cols, "dimension mismatch"
res = iris[0:2] + iris[1:3]
res_rows, res_cols = res.dim()
assert res_rows == rows and res_cols == 2, "dimension mismatch"
#try:
# res = iris + iris[0:3]
# res.show()
# assert False, "expected error. frames are different dimensions."
#except EnvironmentError:
# pass
# LHS: H2OFrame, RHS: H2OVec
#try:
# res = iris + iris[0]
# res.show()
# assert False, "expected error. objects of different dimensions not supported."
#except EnvironmentError:
# pass
# LHS: H2OFrame, RHS: scaler
res = 1.2 + iris[2]
res2 = iris + res[21,:]
res2.show()
# LHS: H2OFrame, RHS: scaler
res = iris + 2
res_rows, res_cols = res.dim()
assert res_rows == rows and res_cols == cols, "dimension mismatch"
for x, y in zip([res[c].sum() for c in range(cols-1)], [469.9, 342.6, 266.9, 162.2]):
assert abs(x - y) < 1e-1, "expected same values"
###################################################################
if __name__ == "__main__":
h2o.run_test(sys.argv, binop_plus)
| ChristosChristofidis/h2o-3 | h2o-py/tests/testdir_munging/binop/pyunit_binop2_plus.py | Python | apache-2.0 | 3,072 |
#!/usr/bin/python
from __future__ import print_function
from guild.actor import Actor, actor_method, process_method, late_bind
class Dog(Actor):
@actor_method # Input - triggered by data coming in
def woof(self):
print("Woof", self)
@process_method # Process - triggered each time it's run
def process(self):
#print(" ", end="")
pass
@late_bind # Output
def produce(self):
pass
class Shitzu(Dog):
def __init__(self):
self.count = 0
super(Dog, self).__init__()
@process_method
def process(self):
self.count += 1
print("I don't go meow", self.count)
if self.count >= 20:
self.stop()
return False
if __name__ == "__main__":
import time
dog = Dog()
shitzu = Shitzu()
dog.start()
shitzu.start()
dog.woof()
shitzu.woof()
time.sleep(0.1)
shitzu.join()
time.sleep(0.1)
dog.stop()
dog.join()
| sparkslabs/guild | examples/dogs_go_woof_actors.py | Python | apache-2.0 | 988 |
import os
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch, Mock
import re
import rdflib
from rdflib import RDF
from urllib import urlencode, unquote
from eulxml.xmlmap import load_xmlobject_from_file, XmlObject
from eulfedora.server import Repository
from piffle import iiif
from readux.annotations.models import Annotation
from readux.books import abbyyocr
from readux.books.models import SolrVolume, Volume, VolumeV1_0, Book, BIBO, \
DC, Page, PageV1_1
FIXTURE_DIR = os.path.join(settings.BASE_DIR, 'readux', 'books', 'fixtures')
class SolrVolumeTest(TestCase):
# primarily testing BaseVolume logic here
def test_properties(self):
ocm = 'ocn460678076'
vol = 'V.1'
noid = '1234'
volume = SolrVolume(label='%s_%s' % (ocm, vol),
pid='testpid:%s' % noid)
self.assertEqual(ocm, volume.control_key)
self.assertEqual(vol, volume.volume)
self.assertEqual(noid, volume.noid)
# don't display volume zero
vol = 'V.0'
volume.data['label'] = '%s_%s' % (ocm, vol)
self.assertEqual('', volume.volume)
# should also work without volume info
volume.data['label'] = ocm
self.assertEqual(ocm, volume.control_key)
self.assertEqual('', volume.volume)
def test_fulltext_absolute_url(self):
volume = SolrVolume(label='ocn460678076_V.1',
pid='testpid:1234')
url = volume.fulltext_absolute_url()
self.assert_(url.startswith('https://'))
self.assert_(url.endswith(reverse('books:text', kwargs={'pid': volume.pid})))
current_site = Site.objects.get_current()
self.assert_(current_site.domain in url)
def test_voyant_url(self):
# Volume with English Lang
volume1 = SolrVolume(label='ocn460678076_V.1',
pid='testpid:1234', language='eng')
url = volume1.voyant_url()
self.assert_(urlencode({'corpus': volume1.pid}) in url,
'voyant url should include volume pid as corpus identifier')
self.assert_(urlencode({'archive': volume1.fulltext_absolute_url()}) in url,
'voyant url should include volume fulltext url as archive')
self.assert_(urlencode({'stopList': 'stop.en.taporware.txt'}) in url,
'voyant url should not include english stopword list when volume is in english')
# volume language is French
volume2 = SolrVolume(label='ocn460678076_V.1',
pid='testpid:1235', language='fra')
url_fra = volume2.voyant_url()
self.assert_(urlencode({'stopList': 'stop.en.taporware.txt'}) not in url_fra,
'voyant url should not include english stopword list when language is not english')
def test_pdf_url(self):
# no start page set
vol = SolrVolume(pid='vol:123')
pdf_url = vol.pdf_url()
self.assertEqual(unquote(reverse('books:pdf', kwargs={'pid': vol.pid})), pdf_url)
# start page
vol = SolrVolume(pid='vol:123', start_page=6)
pdf_url = vol.pdf_url()
self.assert_(pdf_url.startswith(unquote(reverse('books:pdf', kwargs={'pid': vol.pid}))))
self.assert_('#page=6' in pdf_url)
class VolumeTest(TestCase):
# borrowing fixture & test accounts from readux.annotations.tests
fixtures = ['test_annotation_data.json']
user_credentials = {
'user': {'username': 'testuser', 'password': 'testing'},
'superuser': {'username': 'testsuper', 'password': 'superme'}
}
def test_annotations(self):
# find annotations associated with a volume, optionally filtered
# by user
User = get_user_model()
testuser = User.objects.create(username='tester')
testadmin = User.objects.create(username='super', is_superuser=True)
mockapi = Mock()
vol = Volume(mockapi, 'vol:1')
# create annotations to test finding
p1 = Annotation.objects.create(user=testuser, text='testuser p1',
uri=reverse('books:page', kwargs={'vol_pid': vol.pid, 'pid': 'p:1'}),
volume_uri=vol.absolute_url)
p2 = Annotation.objects.create(user=testuser, text='testuser p2',
uri=reverse('books:page', kwargs={'vol_pid': vol.pid, 'pid': 'p:2'}),
volume_uri=vol.absolute_url)
p3 = Annotation.objects.create(user=testuser, text='testuser p3',
uri=reverse('books:page', kwargs={'vol_pid': vol.pid, 'pid': 'p:3'}),
volume_uri=vol.absolute_url)
v2p1 = Annotation.objects.create(user=testuser, text='testuser vol2 p1',
uri=reverse('books:page', kwargs={'vol_pid': 'vol:2', 'pid': 'p:1'}),
volume_uri='http://example.com/books/vol:2/')
sup2 = Annotation.objects.create(user=testadmin, text='testsuper p2',
uri=reverse('books:page', kwargs={'vol_pid': vol.pid, 'pid': 'p:2'}),
volume_uri=vol.absolute_url)
annotations = vol.annotations()
self.assertEqual(4, annotations.count())
self.assert_(v2p1 not in annotations)
# filter by user
annotations = vol.annotations().visible_to(testuser)
self.assertEqual(3, annotations.count())
self.assert_(sup2 not in annotations)
annotations = vol.annotations().visible_to(testadmin)
self.assertEqual(4, annotations.count())
self.assert_(sup2 in annotations)
# annotation counts per page
annotation_count = vol.page_annotation_count()
self.assertEqual(1, annotation_count[p1.uri])
self.assertEqual(2, annotation_count[p2.uri])
self.assertEqual(1, annotation_count[p3.uri])
# by user
annotation_count = vol.page_annotation_count(testuser)
self.assertEqual(1, annotation_count[p2.uri])
annotation_count = vol.page_annotation_count(testadmin)
self.assertEqual(2, annotation_count[p2.uri])
# total for a volume
self.assertEqual(4, vol.annotation_count())
self.assertEqual(3, vol.annotation_count(testuser))
self.assertEqual(4, vol.annotation_count(testadmin))
# total for all volumes
totals = Volume.volume_annotation_count()
self.assertEqual(1, totals['http://example.com/books/vol:2/'])
self.assertEqual(4, totals[vol.absolute_url])
totals = Volume.volume_annotation_count(testuser)
self.assertEqual(3, totals[vol.absolute_url])
def test_has_pages(self):
mockapi = Mock()
vol = Volume(mockapi, 'vol:1')
vol.pages = []
self.assertFalse(vol.has_pages)
# one page (i.e. cover image) is not enough to count as having pages
vol.pages = [Mock(spec=Page)]
self.assertFalse(vol.has_pages)
vol.pages = [Mock(spec=Page), Mock(spec=Page)]
self.assertTrue(vol.has_pages)
def test_has_tei(self):
mockapi = Mock()
vol = Volume(mockapi, 'vol:1')
p1 = Mock(spec=Page)
p1.tei.exists = False
p2 = Mock(spec=Page)
p2.tei.exists = False
vol.pages = [p1, p2]
self.assertFalse(vol.has_tei)
p2.tei.exists = True
self.assertTrue(vol.has_tei)
class VolumeV1_0Test(TestCase):
def setUp(self):
# use uningested objects for testing purposes
repo = Repository()
self.vol = repo.get_object(type=VolumeV1_0)
self.vol.label = 'ocn460678076_V.1'
self.vol.pid = 'rdxtest:4606'
def test_ark_uri(self):
ark_uri = 'http://pid.co/ark:/12345/ba45'
self.vol.dc.content.identifier_list.extend([ark_uri, 'pid:ba45', 'otherid'])
self.assertEqual(ark_uri, self.vol.ark_uri)
def test_rdf_dc(self):
# add metadata to test rdf generated
ark_uri = 'http://pid.co/ark:/12345/ba45'
self.vol.dc.content.identifier_list.append(ark_uri)
self.vol.dc.content.title = 'Sunset, a novel'
self.vol.dc.content.format = 'application/pdf'
self.vol.dc.content.language = 'eng'
self.vol.dc.content.rights = 'public domain'
# NOTE: patching on class instead of instance because related object is a descriptor
with patch.object(Volume, 'book', new=Mock(spec=Book)) as mockbook:
mockbook.dc.content.creator_list = ['Author, Joe']
mockbook.dc.content.date_list = ['1801', '2010']
mockbook.dc.content.description_list = ['digitized edition', 'mystery novel']
mockbook.dc.content.publisher = 'Nashville, Tenn. : Barbee & Smith'
mockbook.dc.content.relation_list = [
'http://pid.co/ark:/12345/book',
'http://pid.co/ark:/12345/volpdf'
]
graph = self.vol.rdf_dc_graph()
lit = rdflib.Literal
uri = rdflib.URIRef(self.vol.ark_uri)
self.assert_((uri, RDF.type, BIBO.book) in graph,
'rdf graph type should be bibo:book')
self.assert_((uri, DC.title, lit(self.vol.dc.content.title)) in graph,
'title should be set as dc:title')
self.assert_((uri, BIBO.volume, lit(self.vol.volume)) in graph,
'volume label should be set as bibo:volume')
self.assert_((uri, DC['format'], lit(self.vol.dc.content.format)) in graph,
'format should be set as dc:format')
self.assert_((uri, DC.language, lit(self.vol.dc.content.language)) in graph,
'language should be set as dc:language')
self.assert_((uri, DC.rights, lit(self.vol.dc.content.rights)) in graph,
'rights should be set as dc:rights')
for rel in self.vol.dc.content.relation_list:
self.assert_((uri, DC.relation, lit(rel)) in graph,
'related item %s should be set as dc:relation' % rel)
# metadata pulled from book obj because not present in volume
self.assert_((uri, DC.creator, lit(mockbook.dc.content.creator_list[0])) in graph,
'creator from book metadata should be set as dc:creator when not present in volume metadata')
self.assert_((uri, DC.publisher, lit(mockbook.dc.content.publisher)) in graph,
'publisher from book metadata should be set as dc:publisher when not present in volume metadata')
# earliest date only
self.assert_((uri, DC.date, lit('1801')) in graph,
'earliest date 1801 from book metadata should be set as dc:date when not present in volume metadata')
for d in mockbook.dc.content.description_list:
self.assert_((uri, DC.description, lit(d)) in graph,
'description from book metadata should be set as dc:description when not present in volume metadata')
# volume-level metadata should be used when present instead of book
self.vol.dc.content.creator_list = ['Writer, Jane']
self.vol.dc.content.date_list = ['1832', '2012']
self.vol.dc.content.description_list = ['digital edition']
self.vol.dc.content.publisher = 'So & So Publishers'
graph = self.vol.rdf_dc_graph()
self.assert_((uri, DC.creator, lit(self.vol.dc.content.creator_list[0])) in graph,
'creator from volume metadata should be set as dc:creator when present')
self.assert_((uri, DC.publisher, lit(self.vol.dc.content.publisher)) in graph,
'publisher from volume metadata should be set as dc:publisher when present')
# earliest date *only* should be present
self.assert_((uri, DC.date, lit('1832')) in graph,
'earliest date 1832 from volume metadata should be set as dc:date when present')
for d in self.vol.dc.content.description_list:
self.assert_((uri, DC.description, lit(d)) in graph,
'description from volume metadata should be set as dc:description when present')
def test_index_data(self):
self.vol.owner = ''
self.vol.dc.content.date = 1842
# NOTE: patching on class instead of instance because related object is a descriptor
with patch.object(Volume, 'book', new=Mock(spec=Book)) as mockbook:
mockbook.pid = 'book:123'
mockbook.collection.pid = 'coll:123',
mockbook.collection.short_label = 'Pile O\' Books'
mockbook.dc.content.creator_list = ['Author, Joe']
mockbook.dc.content.date_list = ['1801', '2010']
mockbook.dc.content.description_list = ['digitized edition', 'mystery novel']
mockbook.dc.content.publisher = 'Nashville, Tenn. : Barbee & Smith'
mockbook.dc.content.relation_list = [
'http://pid.co/ark:/12345/book',
'http://pid.co/ark:/12345/volpdf'
]
mockbook.dc.content.subject_list = []
data = self.vol.index_data()
self.assert_('fulltext' not in data,
'fulltext should not be set in index data when volume has no ocr')
self.assert_('hasPrimaryImage' not in data,
'hasPrimaryImage should not be set in index data when volume has no cover')
self.assertEqual(mockbook.pid, data['book_id'],
'associated book pid should be set as book id')
self.assertEqual(mockbook.collection.pid, data['collection_id'],
'associated collection pid should be set as collection id')
self.assertEqual(mockbook.collection.short_label, data['collection_label'],
'associated collection label short label should be set as collection label')
self.assertEqual(mockbook.dc.content.creator_list, data['creator'],
'creator should be set from book DC creator')
self.assertEqual(self.vol.dc.content.date_list, data['date'],
'date should be set from earliest volume DC date')
self.assert_('subject' not in data,
'subject should not be set in index data when book has no subjects')
self.assertEqual(0, data['page_count'],
'page count should be set to zero when volume has no pages loaded')
# test hasPrimaryImage
mockpage = Mock(spec=Page)
mockpage.pid = 'page:1234'
mockpage.uriref = rdflib.URIRef('info:fedora/%s' % mockpage.pid)
self.vol.primary_image = mockpage
data = self.vol.index_data()
self.assertEqual(mockpage.pid, data['hasPrimaryImage'],
'hasPrimaryImage should be set to cover page pid, when present')
# test subjects
mockbook.dc.content.subject_list = ['subj1', 'subj2']
data = self.vol.index_data()
self.assertEqual(mockbook.dc.content.subject_list, data['subject'],
'subject should be set when present in book DC')
# test full-text
with patch.object(self.vol, 'ocr') as mockocr:
mockocr.exists = True
ocr_xml = load_xmlobject_from_file(os.path.join(FIXTURE_DIR,
'abbyyocr_fr8v2.xml'))
mockocr.content = ocr_xml
data = self.vol.index_data()
self.assert_('fulltext' in data,
'fulltext should be set in index data when OCR is available')
# use mock to test pdf size indexing
with patch.object(self.vol, 'pdf') as mockpdf:
mockpdf.size = 1234567
data = self.vol.index_data()
self.assertEqual(mockpdf.size, data['pdf_size'],
'pdf_size should be set from pdf size, when available')
def test_voyant_url(self):
# NOTE: this test is semi-redundant with the same test for the SolrVolume,
# but since the method is implemented in BaseVolume and depends on
# properties set on the subclasses, testing here to ensure it works
# in both cases
# no language
self.vol.pid = 'vol:1234'
url = self.vol.voyant_url()
self.assert_(urlencode({'corpus': self.vol.pid}) in url,
'voyant url should include volume pid as corpus identifier')
self.assert_(urlencode({'archive': self.vol.fulltext_absolute_url()}) in url,
'voyant url should include volume fulltext url as archive')
self.assert_(urlencode({'stopList': 'stop.en.taporware.txt'}) not in url,
'voyant url should not include english stopword list when volume is not in english')
# english
self.vol.dc.content.language = 'eng'
url = self.vol.voyant_url()
self.assert_(urlencode({'stopList': 'stop.en.taporware.txt'}) in url,
'voyant url should include english stopword list when volume is in english')
def test_get_fulltext(self):
with patch.object(self.vol, 'ocr') as mockocr:
mockocr.exists = True
# abbyy finereader v8
ocr_xml = load_xmlobject_from_file(os.path.join(FIXTURE_DIR,
'abbyyocr_fr8v2.xml'))
mockocr.content = ocr_xml
text = self.vol.get_fulltext()
# check for arbitrary text content
self.assert_('In presenting this, the initial volume of the' in text,
'ocr text content should be present in plain text')
self.assert_('Now, kind reader, we ask that you do not crit' in text,
'ocr text content should be present in plain text')
self.assert_(re.search(r'Baldwin\s+Dellinger\s+Brice', text),
'table row content should be displayed on a single line')
# abbyy finereader v6
ocr_xml = load_xmlobject_from_file(os.path.join(FIXTURE_DIR,
'abbyyocr_fr6v1.xml'))
mockocr.content = ocr_xml
text = self.vol.get_fulltext()
# check for arbitrary text content
self.assert_('was late in the autumn, the vines yet kept their leaves,' in text,
'ocr text content should be present in plain text')
self.assert_('walked up the steps. The lady had not moved, and made' in text,
'ocr text content should be present in plain text')
self.assert_(re.search(r'Modern\.\s+New Standard\.\s+Popular\.', text),
'table row content should be displayed on a single line')
def test_ocr_ids(self):
# pach in fixture ocr content
with patch.object(self.vol, 'ocr') as mockocr:
mockocr.exists = True
ocr_xml = load_xmlobject_from_file(os.path.join(FIXTURE_DIR,
'abbyyocr_fr8v2.xml'))
mockocr.content = ocr_xml
self.assertFalse(self.vol.ocr_has_ids)
self.vol.add_ocr_ids()
self.assertTrue(self.vol.ocr_has_ids)
class PageV1_1Test(TestCase):
metsalto_doc = os.path.join(FIXTURE_DIR, 'mets_alto.xml')
def setUp(self):
self.mets_alto = load_xmlobject_from_file(self.metsalto_doc, XmlObject)
def test_ocr_ids(self):
page = PageV1_1(Mock()) # use mock for fedora api, since we won't make any calls
page.pid = 'rdxtest:4607'
with patch.object(page, 'ocr') as mockocr:
mockocr.exists = True
mockocr.content = self.mets_alto
self.assertFalse(page.ocr_has_ids)
page.add_ocr_ids()
self.assertTrue(page.ocr_has_ids)
class AbbyyOCRTestCase(TestCase):
fr6v1_doc = os.path.join(FIXTURE_DIR, 'abbyyocr_fr6v1.xml')
fr8v2_doc = os.path.join(FIXTURE_DIR, 'abbyyocr_fr8v2.xml')
# language code
eng = 'EnglishUnitedStates'
def setUp(self):
self.fr6v1 = load_xmlobject_from_file(self.fr6v1_doc, abbyyocr.Document)
self.fr8v2 = load_xmlobject_from_file(self.fr8v2_doc, abbyyocr.Document)
def test_document(self):
# top-level document properties
# finereader 6 v1
self.assertEqual(132, self.fr6v1.page_count)
self.assertEqual(self.eng, self.fr6v1.language)
self.assertEqual(self.eng, self.fr6v1.languages)
self.assert_(self.fr6v1.pages, 'page list should be non-empty')
self.assertEqual(132, len(self.fr6v1.pages),
'number of pages should match page count')
self.assert_(isinstance(self.fr6v1.pages[0], abbyyocr.Page))
# finereader 8 v2
self.assertEqual(186, self.fr8v2.page_count)
self.assertEqual(self.eng, self.fr8v2.language)
self.assertEqual(self.eng, self.fr8v2.languages)
self.assert_(self.fr8v2.pages, 'page list should be non-empty')
self.assertEqual(186, len(self.fr8v2.pages),
'number of pages should match page count')
self.assert_(isinstance(self.fr8v2.pages[0], abbyyocr.Page))
def test_page(self):
# finereader 6 v1
self.assertEqual(1500, self.fr6v1.pages[0].width)
self.assertEqual(2174, self.fr6v1.pages[0].height)
self.assertEqual(300, self.fr6v1.pages[0].resolution)
# second page has picture block, no text
self.assertEqual(1, len(self.fr6v1.pages[1].blocks))
self.assertEqual(1, len(self.fr6v1.pages[1].picture_blocks))
self.assertEqual(0, len(self.fr6v1.pages[1].text_blocks))
self.assert_(isinstance(self.fr6v1.pages[1].blocks[0], abbyyocr.Block))
# fourth page has paragraph text
self.assert_(self.fr6v1.pages[3].paragraphs)
self.assert_(isinstance(self.fr6v1.pages[3].paragraphs[0],
abbyyocr.Paragraph))
# finereader 8 v2
self.assertEqual(2182, self.fr8v2.pages[0].width)
self.assertEqual(3093, self.fr8v2.pages[0].height)
self.assertEqual(300, self.fr8v2.pages[0].resolution)
# first page has multiple text/pic blocks
self.assert_(self.fr8v2.pages[0].blocks)
self.assert_(self.fr8v2.pages[0].picture_blocks)
self.assert_(self.fr8v2.pages[0].text_blocks)
self.assert_(isinstance(self.fr8v2.pages[0].blocks[0], abbyyocr.Block))
# first page has paragraph text
self.assert_(self.fr8v2.pages[0].paragraphs)
self.assert_(isinstance(self.fr8v2.pages[0].paragraphs[0],
abbyyocr.Paragraph))
def test_block(self):
# finereader 6 v1
# - basic block attributes
b = self.fr6v1.pages[1].blocks[0]
self.assertEqual('Picture', b.type)
self.assertEqual(144, b.left)
self.assertEqual(62, b.top)
self.assertEqual(1358, b.right)
self.assertEqual(2114, b.bottom)
# - block with text
b = self.fr6v1.pages[3].blocks[0]
self.assert_(b.paragraphs)
self.assert_(isinstance(b.paragraphs[0], abbyyocr.Paragraph))
# finereader 8 v2
b = self.fr8v2.pages[0].blocks[0]
self.assertEqual('Text', b.type)
self.assertEqual(282, b.left)
self.assertEqual(156, b.top)
self.assertEqual(384, b.right)
self.assertEqual(228, b.bottom)
self.assert_(b.paragraphs)
self.assert_(isinstance(b.paragraphs[0], abbyyocr.Paragraph))
def test_paragraph_line(self):
# finereader 6 v1
para = self.fr6v1.pages[3].paragraphs[0]
# untested: align, left/right/start indent
self.assert_(para.lines)
self.assert_(isinstance(para.lines[0], abbyyocr.Line))
line = para.lines[0]
self.assertEqual(283, line.baseline)
self.assertEqual(262, line.left)
self.assertEqual(220, line.top)
self.assertEqual(1220, line.right)
self.assertEqual(294, line.bottom)
# line text available via unicode
self.assertEqual(u'MABEL MEREDITH;', unicode(line))
# also mapped as formatted text (could repeat/segment)
self.assert_(line.formatted_text) # should be non-empty
self.assert_(isinstance(line.formatted_text[0], abbyyocr.Formatting))
self.assertEqual(self.eng, line.formatted_text[0].language)
self.assertEqual(u'MABEL MEREDITH;', line.formatted_text[0].text) # not normalized
# finereader 8 v2
para = self.fr8v2.pages[1].paragraphs[0]
self.assert_(para.lines)
self.assert_(isinstance(para.lines[0], abbyyocr.Line))
line = para.lines[0]
self.assertEqual(1211, line.baseline)
self.assertEqual(845, line.left)
self.assertEqual(1160, line.top)
self.assertEqual(1382, line.right)
self.assertEqual(1213, line.bottom)
self.assertEqual(u'EMORY UNIVERSITY', unicode(line))
self.assert_(line.formatted_text) # should be non-empty
self.assert_(isinstance(line.formatted_text[0], abbyyocr.Formatting))
self.assertEqual(self.eng, line.formatted_text[0].language)
self.assertEqual(u'EMORY UNIVERSITY', line.formatted_text[0].text)
def test_frns(self):
self.assertEqual('fr6v1:par|fr8v2:par', abbyyocr.frns('par'))
self.assertEqual('fr6v1:text/fr6v1:par|fr8v2:text/fr8v2:par',
abbyyocr.frns('text/par'))
| emory-libraries/readux | readux/books/tests/models.py | Python | apache-2.0 | 25,398 |
# Copyright (c) 2019 Verizon Media
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ALIAS = 'tag-ports-during-bulk-creation'
IS_SHIM_EXTENSION = True
IS_STANDARD_ATTR_EXTENSION = False
NAME = 'Tag Ports During Bulk Creation'
DESCRIPTION = 'Allow to tag ports during bulk creation'
UPDATED_TIMESTAMP = '2019-12-29T19:00:00-00:00'
RESOURCE_ATTRIBUTE_MAP = {}
SUB_RESOURCE_ATTRIBUTE_MAP = {}
ACTION_MAP = {}
REQUIRED_EXTENSIONS = []
OPTIONAL_EXTENSIONS = []
ACTION_STATUS = {}
| openstack/neutron-lib | neutron_lib/api/definitions/tag_ports_during_bulk_creation.py | Python | apache-2.0 | 1,024 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from textwrap import dedent
from pants.backend.jvm.register import build_file_aliases as register_jvm
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jvm_binary import (Duplicate, JarRules, JvmBinary, ManifestEntries,
Skip)
from pants.base.address import BuildFileAddress
from pants.base.exceptions import TargetDefinitionException
from pants.base.payload_field import FingerprintedField
from pants.base.target import Target
from pants_test.base_test import BaseTest
class JarRulesTest(unittest.TestCase):
def test_jar_rule(self):
dup_rule = Duplicate('foo', Duplicate.REPLACE)
self.assertEquals('Duplicate(apply_pattern=foo, action=REPLACE)',
repr(dup_rule))
skip_rule = Skip('foo')
self.assertEquals('Skip(apply_pattern=foo)', repr(skip_rule))
def test_invalid_apply_pattern(self):
with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern is not a string'):
Skip(None)
with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern is not a string'):
Duplicate(None, Duplicate.SKIP)
with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern: \) is not a valid'):
Skip(r')')
with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern: \) is not a valid'):
Duplicate(r')', Duplicate.SKIP)
def test_bad_action(self):
with self.assertRaisesRegexp(ValueError, r'The supplied action must be one of'):
Duplicate('foo', None)
def test_duplicate_error(self):
with self.assertRaisesRegexp(Duplicate.Error, r'Duplicate entry encountered for path foo'):
raise Duplicate.Error('foo')
def test_default(self):
jar_rules = JarRules.default()
self.assertTrue(4, len(jar_rules.rules))
for rule in jar_rules.rules:
self.assertTrue(rule.apply_pattern.pattern.startswith(r'^META-INF'))
def test_set_bad_default(self):
with self.assertRaisesRegexp(ValueError, r'The default rules must be a JarRules'):
JarRules.set_default(None)
class JvmBinaryTest(BaseTest):
@property
def alias_groups(self):
return register_jvm()
def test_simple(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
basename='foo-base',
)
'''))
target = self.target('//:foo')
self.assertEquals('com.example.Foo', target.main)
self.assertEquals('com.example.Foo', target.payload.main)
self.assertEquals('foo-base', target.basename)
self.assertEquals('foo-base', target.payload.basename)
self.assertEquals([], target.deploy_excludes)
self.assertEquals([], target.payload.deploy_excludes)
self.assertEquals(JarRules.default(), target.deploy_jar_rules)
self.assertEquals(JarRules.default(), target.payload.deploy_jar_rules)
self.assertEquals({}, target.payload.manifest_entries.entries);
def test_default_base(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
)
'''))
target = self.target('//:foo')
self.assertEquals('foo', target.basename)
def test_deploy_jar_excludes(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
deploy_excludes=[exclude(org='example.com', name='foo-lib')],
)
'''))
target = self.target('//:foo')
self.assertEquals([Exclude(org='example.com', name='foo-lib')],
target.deploy_excludes)
def test_deploy_jar_rules(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
deploy_jar_rules=jar_rules([Duplicate('foo', Duplicate.SKIP)],
default_dup_action=Duplicate.FAIL)
)
'''))
target = self.target('//:foo')
jar_rules = target.deploy_jar_rules
self.assertEquals(1, len(jar_rules.rules))
self.assertEquals('foo', jar_rules.rules[0].apply_pattern.pattern)
self.assertEquals(repr(Duplicate.SKIP),
repr(jar_rules.rules[0].action)) # <object object at 0x...>
self.assertEquals(Duplicate.FAIL, jar_rules.default_dup_action)
def test_bad_source_declaration(self):
build_file = self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
source=['foo.py'],
)
'''))
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmBinary.*foo.*source must be a single'):
self.build_graph.inject_address_closure(BuildFileAddress(build_file, 'foo'))
def test_bad_sources_declaration(self):
with self.assertRaisesRegexp(Target.IllegalArgument,
r'jvm_binary only supports a single "source" argument'):
self.make_target('foo:foo', target_type=JvmBinary, main='com.example.Foo', sources=['foo.py'])
def test_bad_main_declaration(self):
build_file = self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='bar',
main=['com.example.Bar'],
)
'''))
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmBinary.*bar.*main must be a fully'):
self.build_graph.inject_address_closure(BuildFileAddress(build_file, 'bar'))
def test_bad_jar_rules(self):
build_file = self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
deploy_jar_rules='invalid',
)
'''))
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmBinary.*foo.*'
r'deploy_jar_rules must be a JarRules specification. got str'):
self.build_graph.inject_address_closure(BuildFileAddress(build_file, 'foo'))
def _assert_fingerprints_not_equal(self, fields):
for field in fields:
for other_field in fields:
if field == other_field:
continue
self.assertNotEquals(field.fingerprint(), other_field.fingerprint())
def test_jar_rules_field(self):
field1 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)]))
field1_same = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)]))
field2 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.CONCAT)]))
field3 = FingerprintedField(JarRules(rules=[Duplicate('bar', Duplicate.SKIP)]))
field4 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP),
Duplicate('bar', Duplicate.SKIP)]))
field5 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP), Skip('foo')]))
field6 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)],
default_dup_action=Duplicate.FAIL))
field6_same = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)],
default_dup_action=Duplicate.FAIL))
field7 = FingerprintedField(JarRules(rules=[Skip('foo')]))
field8 = FingerprintedField(JarRules(rules=[Skip('bar')]))
field8_same = FingerprintedField(JarRules(rules=[Skip('bar')]))
self.assertEquals(field1.fingerprint(), field1_same.fingerprint())
self.assertEquals(field6.fingerprint(), field6_same.fingerprint())
self.assertEquals(field8.fingerprint(), field8_same.fingerprint())
self._assert_fingerprints_not_equal([field1, field2, field3, field4, field5, field6, field7])
def test_manifest_entries(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
manifest_entries= {
'Foo-Field' : 'foo',
}
)
'''))
target = self.target('//:foo')
self.assertTrue(isinstance(target.payload.manifest_entries, ManifestEntries))
entries = target.payload.manifest_entries.entries
self.assertEquals({ 'Foo-Field' : 'foo'}, entries)
def test_manifest_not_dict(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
manifest_entries= 'foo',
)
'''))
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmBinary\(BuildFileAddress\(.*BUILD\), foo\)\): '
r'manifest_entries must be a dict. got str'):
self.target('//:foo')
def test_manifest_bad_key(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
manifest_entries= {
jar(org='bad', name='bad', rev='bad') : 'foo',
}
)
'''))
with self.assertRaisesRegexp(ManifestEntries.ExpectedDictionaryError,
r'entries must be dictionary of strings, got key bad-bad-bad type JarDependency'):
self.target('//:foo')
def test_manifest_entries_fingerprint(self):
field1 = ManifestEntries()
field2 = ManifestEntries({'Foo-Field' : 'foo'})
field2_same = ManifestEntries({'Foo-Field' : 'foo'})
field3 = ManifestEntries({'Foo-Field' : 'foo', 'Bar-Field' : 'bar'})
self.assertEquals(field2.fingerprint(), field2_same.fingerprint())
self._assert_fingerprints_not_equal([field1, field2, field3])
| digwanderlust/pants | tests/python/pants_test/backend/jvm/targets/test_jvm_binary.py | Python | apache-2.0 | 9,783 |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Watch a running build job and output changes to the screen.
"""
import fcntl
import os
import select
import socket
import sys
import tempfile
import termios
import time
import traceback
from rmake import errors
from rmake.build import buildjob, buildtrove
from rmake.cmdline import query
def _getUri(client):
if not isinstance(client.uri, str) or client.uri.startswith('unix://'):
fd, tmpPath = tempfile.mkstemp()
os.close(fd)
uri = 'unix://' + tmpPath
else:
host = socket.gethostname()
uri = 'http://%s' % host
tmpPath = None
return uri, tmpPath
def monitorJob(client, jobId, showTroveDetails=False, showBuildLogs=False,
exitOnFinish=None, uri=None, serve=True, out=None,
displayClass=None):
if not uri:
uri, tmpPath = _getUri(client)
else:
tmpPath = None
if not displayClass:
displayClass = JobLogDisplay
try:
display = displayClass(client, showBuildLogs=showBuildLogs, out=out,
exitOnFinish=exitOnFinish)
client = client.listenToEvents(uri, jobId, display,
showTroveDetails=showTroveDetails,
serve=serve)
return client
finally:
if serve and tmpPath:
os.remove(tmpPath)
def waitForJob(client, jobId, uri=None, serve=True):
if not uri:
uri, tmpPath = _getUri(client)
else:
tmpPath = None
try:
display = SilentDisplay(client)
display._primeOutput(jobId)
return client.listenToEvents(uri, jobId, display, serve=serve)
finally:
if tmpPath:
os.remove(tmpPath)
class _AbstractDisplay(object):#xmlrpc.BasicXMLRPCStatusSubscriber):
def __init__(self, client, showBuildLogs=True, out=None,
exitOnFinish=True):
self.client = client
self.finished = False
self.exitOnFinish = True # override exitOnFinish setting
self.showBuildLogs = showBuildLogs
if not out:
out = sys.stdout
self.out = out
def close(self):
pass
def _serveLoopHook(self):
pass
def _msg(self, msg, *args):
self.out.write('[%s] %s\n' % (time.strftime('%X'), msg))
self.out.flush()
def _jobStateUpdated(self, jobId, state, status):
isFinished = (state in (buildjob.JOB_STATE_FAILED,
buildjob.JOB_STATE_BUILT))
if isFinished:
self._setFinished()
def _setFinished(self):
self.finished = True
def _isFinished(self):
return self.finished
def _shouldExit(self):
return self._isFinished() and self.exitOnFinish
def _primeOutput(self, jobId):
job = self.client.getJob(jobId, withTroves=False)
if job.isFinished():
self._setFinished()
class SilentDisplay(_AbstractDisplay):
pass
class JobLogDisplay(_AbstractDisplay):
def __init__(self, client, showBuildLogs=True, out=None,
exitOnFinish=None):
_AbstractDisplay.__init__(self, client, out=out,
showBuildLogs=showBuildLogs,
exitOnFinish=exitOnFinish)
self.buildingTroves = {}
def _tailBuildLog(self, jobId, troveTuple):
mark = self.buildingTroves.get((jobId, troveTuple), [0])[0]
self.buildingTroves[jobId, troveTuple] = [mark, True]
self.out.write('Tailing %s build log:\n\n' % troveTuple[0])
def _stopTailing(self, jobId, troveTuple):
mark = self.buildingTroves.get((jobId, troveTuple), [0])[0]
self.buildingTroves[jobId, troveTuple] = [ mark, False ]
def _serveLoopHook(self):
if not self.buildingTroves:
return
for (jobId, troveTuple), (mark, tail) in self.buildingTroves.items():
if not tail:
continue
try:
moreData, data, mark = self.client.getTroveBuildLog(jobId,
troveTuple,
mark)
except:
moreData = True
data = ''
self.out.write(data)
if not moreData:
del self.buildingTroves[jobId, troveTuple]
else:
self.buildingTroves[jobId, troveTuple][0] = mark
def _jobTrovesSet(self, jobId, troveData):
self._msg('[%d] - job troves set' % jobId)
def _jobStateUpdated(self, jobId, state, status):
_AbstractDisplay._jobStateUpdated(self, jobId, state, status)
state = buildjob.stateNames[state]
if self._isFinished():
self._serveLoopHook()
self._msg('[%d] - State: %s' % (jobId, state))
if status:
self._msg('[%d] - %s' % (jobId, status))
def _jobLogUpdated(self, jobId, state, status):
self._msg('[%d] %s' % (jobId, status))
def _troveStateUpdated(self, (jobId, troveTuple), state, status):
isBuilding = (state in (buildtrove.TroveState.BUILDING,
buildtrove.TroveState.RESOLVING))
state = buildtrove.stateNames[state]
self._msg('[%d] - %s - State: %s' % (jobId, troveTuple[0], state))
if status:
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], status))
if isBuilding and self.showBuildLogs:
self._tailBuildLog(jobId, troveTuple)
else:
self._stopTailing(jobId, troveTuple)
def _troveLogUpdated(self, (jobId, troveTuple), state, status):
state = buildtrove.stateNames[state]
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], status))
def _trovePreparingChroot(self, (jobId, troveTuple), host, path):
if host == '_local_':
msg = 'Chroot at %s' % path
else:
msg = 'Chroot at Node %s:%s' % (host, path)
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], msg))
def _primeOutput(self, jobId):
logMark = 0
while True:
newLogs = self.client.getJobLogs(jobId, logMark)
if not newLogs:
break
logMark += len(newLogs)
for (timeStamp, message, args) in newLogs:
print '[%s] [%s] - %s' % (timeStamp, jobId, message)
BUILDING = buildtrove.TroveState.BUILDING
troveTups = self.client.listTrovesByState(jobId, BUILDING).get(BUILDING, [])
for troveTuple in troveTups:
self._tailBuildLog(jobId, troveTuple)
_AbstractDisplay._primeOutput(self, jobId)
def set_raw_mode():
fd = sys.stdin.fileno()
oldTerm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldFlags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldFlags | os.O_NONBLOCK)
return oldTerm, oldFlags
def restore_terminal(oldTerm, oldFlags):
fd = sys.stdin.fileno()
if oldTerm:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldTerm)
if oldFlags:
fcntl.fcntl(fd, fcntl.F_SETFL, oldFlags)
class _AbstractDisplay(object):#xmlrpc.BasicXMLRPCStatusSubscriber):
def __init__(self, client, showBuildLogs=True, out=None):
self.client = client
self.finished = False
self.showBuildLogs = showBuildLogs
self.troveStates = {}
self.troveIndex = None
self.troveDislay = False
self.out = OutBuffer(out)
def close(self):
pass
def _msg(self, msg, *args):
self.out.write('\r[%s] %s\n' % (time.strftime('%X'), msg))
self.out.write('(h for help)>')
self.out.flush()
def _jobStateUpdated(self, jobId, state, status):
isFinished = (state in (buildjob.JOB_STATE_FAILED,
buildjob.JOB_STATE_BUILT))
if isFinished:
self._setFinished()
def _setFinished(self):
self.finished = True
def _isFinished(self):
return self.finished
def _shouldExit(self):
return self._isFinished() and self.exitOnFinish
def _primeOutput(self, jobId):
job = self.client.getJob(jobId, withTroves=False)
if job.isFinished():
self._setFinished()
def _dispatch(self, methodname, (callData, responseHandler, args)):
if methodname.startswith('_'):
raise NoSuchMethodError(methodname)
else:
responseHandler.sendResponse('')
getattr(self, methodname)(*args)
class SilentDisplay(_AbstractDisplay):
def _updateBuildLog(self):
pass
class JobLogDisplay(_AbstractDisplay):
def __init__(self, client, state, out=None):
_AbstractDisplay.__init__(self, client, out)
self.troveToWatch = None
self.watchTroves = False
self.buildingTroves = {}
self.state = state
self.lastLen = 0
self.promptFormat = '%(jobId)s %(name)s%(context)s - %(state)s - (%(tailing)s) ([h]elp)>'
self.updatePrompt()
def close(self):
self.out.write('\n')
self.out.flush()
def _msg(self, msg, *args):
self.erasePrompt()
self.out.write('[%s] %s\n' % (time.strftime('%X'), msg))
self.writePrompt()
def updatePrompt(self):
if self.troveToWatch:
if self.troveToWatch not in self.state.troves:
self.troveToWatch = self.state.troves[0]
state = self.state.getTroveState(*self.troveToWatch)
state = buildtrove.stateNames[state]
name = self.troveToWatch[1][0].split(':', 1)[0] # remove :source
context = self.troveToWatch[1][3]
d = dict(jobId=self.troveToWatch[0], name=name, state=state,
context=(context and '{%s}' % context or ''))
else:
d = dict(jobId='(None)', name='(None)', state='', context='')
if not self.state.jobActive():
tailing = 'Job %s' % self.state.getJobStateName()
elif self.watchTroves:
tailing = 'Details on'
else:
tailing = 'Details off'
d['tailing'] = tailing
self.prompt = self.promptFormat % d
self.erasePrompt()
self.writePrompt()
def erasePrompt(self):
self.out.write('\r%s\r' % (' '*self.lastLen))
def writePrompt(self):
self.out.write(self.prompt)
self.lastLen = len(self.prompt)
self.out.flush()
def setWatchTroves(self, watchTroves=True):
self.watchTroves = watchTroves
self.updatePrompt()
def getWatchTroves(self):
return self.watchTroves
def setTroveToWatch(self, jobId, troveTuple):
self.troveToWatch = jobId, troveTuple
self.updatePrompt()
def _watchTrove(self, jobId, troveTuple):
if not self.watchTroves:
return False
return self.troveToWatch == (jobId, troveTuple)
def displayTroveStates(self):
if not self.troveToWatch:
return
self.erasePrompt()
job = self.client.getJob(self.troveToWatch[0])
query.displayTrovesByState(job, out=self.out)
self.writePrompt()
def setPrompt(self, promptFormat):
self.promptFormat = promptFormat
self.updatePrompt()
def updateBuildLog(self, jobId, troveTuple):
if not self._watchTrove(jobId, troveTuple):
return
mark = self.getMark(jobId, troveTuple)
if mark is None:
return
try:
moreData, data, mark = self.client.getTroveBuildLog(jobId,
troveTuple,
mark)
except:
return
if data and data != '\n':
self.erasePrompt()
if data[0] == '\n':
# we've already got a \n because we've cleared
# the prompt.
data = data[1:]
self.out.write(data)
if data[-1] != '\n':
self.out.write('\n')
self.writePrompt()
if not moreData:
mark = None
self.setMark(jobId, troveTuple, mark)
def getMark(self, jobId, troveTuple):
if (jobId, troveTuple) not in self.buildingTroves:
# display max 80 lines of back log
self.buildingTroves[jobId, troveTuple] = -80
return self.buildingTroves[jobId, troveTuple]
def setMark(self, jobId, troveTuple, mark):
self.buildingTroves[jobId, troveTuple] = mark
def _jobTrovesSet(self, jobId, troveList):
self._msg('[%d] - job troves set' % jobId)
self.troveToWatch = jobId, troveList[0]
self.updatePrompt()
def _jobStateUpdated(self, jobId, state, status):
_AbstractDisplay._jobStateUpdated(self, jobId, state, status)
state = buildjob.stateNames[state]
if self._isFinished() and self.troveToWatch:
self.updateBuildLog(*self.troveToWatch)
self._msg('[%d] - State: %s' % (jobId, state))
if status:
self._msg('[%d] - %s' % (jobId, status))
self.updatePrompt()
def _jobLogUpdated(self, jobId, state, status):
self._msg('[%d] %s' % (jobId, status))
def _troveStateUpdated(self, (jobId, troveTuple), state, status):
isBuilding = (state == buildtrove.TroveState.BUILDING)
state = buildtrove.stateNames[state]
if troveTuple[3]:
name = '%s{%s}' % (troveTuple[0], troveTuple[3])
else:
name = troveTuple[0]
self._msg('[%d] - %s - State: %s' % (jobId, name, state))
if status and self._watchTrove(jobId, troveTuple):
self._msg('[%d] - %s - %s' % (jobId, name, status))
self.updatePrompt()
def _troveLogUpdated(self, (jobId, troveTuple), state, status):
if self._watchTrove(jobId, troveTuple):
state = buildtrove.stateNames[state]
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], status))
def _trovePreparingChroot(self, (jobId, troveTuple), host, path):
if not self._watchTrove(jobId, troveTuple):
return
if host == '_local_':
msg = 'Chroot at %s' % path
else:
msg = 'Chroot at Node %s:%s' % (host, path)
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], msg))
class OutBuffer(object):
def __init__(self, fd):
if fd is None:
fd = sys.stdout.fileno()
elif not isinstance(out, int):
fd = out.fileno()
self.fd = fd
self.data = []
def write(self, data):
self.data.append(data)
def fileno(self):
return self.fd
def flush(self):
while self.data:
self.check()
def check(self):
while self.data:
ready = select.select([], [self.fd], [], 0.1)[1]
if not ready:
return
rc = os.write(self.fd, self.data[0])
if rc < len(self.data[0]):
self.data[0] = self.data[0][rc:]
else:
self.data.pop(0)
class DisplayState(object):#xmlrpc.BasicXMLRPCStatusSubscriber):
def __init__(self, client):
self.troves = []
self.states = {}
self.buildingTroves = {}
self.jobId = None
self.client = client
self.jobState = None
def _primeOutput(self, jobId):
#assert(not self.jobId)
self.jobId = jobId
job = self.client.getJob(jobId, withTroves=False)
self.jobState = job.state
if job.isBuilding() or job.isFinished() or job.isFailed():
self.updateTrovesForJob(jobId)
def jobActive(self):
return self.jobState in (
buildjob.JOB_STATE_STARTED,
buildjob.JOB_STATE_LOADING,
buildjob.JOB_STATE_LOADED,
buildjob.JOB_STATE_BUILD,
)
def getJobStateName(self):
if self.jobState is None:
return 'None'
return buildjob.stateNames[self.jobState]
def isFailed(self, jobId, troveTuple):
return (self.getTroveState(jobId, troveTuple)
== buildtrove.TroveState.FAILED)
def isBuilding(self, jobId, troveTuple):
return self.getTroveState(jobId, troveTuple) in (
buildtrove.TroveState.BUILDING,
buildtrove.TroveState.PREPARING,
buildtrove.TroveState.RESOLVING)
def isFailed(self, jobId, troveTuple):
# don't iterate through unbuildable - they are failures due to
# secondary causes.
return self.getTroveState(jobId, troveTuple) in (
buildtrove.TroveState.FAILED,)
def findTroveByName(self, troveName):
startsWith = None
for jobId, troveTuple in sorted(self.states):
if troveTuple[0].split(':', 1)[0] == troveName:
# exact matches take priority
return (jobId, troveTuple)
elif troveTuple[0].startswith(troveName) and startsWith is None:
startsWith = (jobId, troveTuple)
return startsWith
def getTroveState(self, jobId, troveTuple):
return self.states[jobId, troveTuple]
def getBuildingTroves(self):
return [ x[0] for x in self.states.iteritems()
if x[1] in (buildtrove.TroveState.BUILDING,
buildtrove.TroveState.RESOLVING) ]
def updateTrovesForJob(self, jobId):
self.troves = []
self.states = {}
for state, troveTupleList in self.client.listTrovesByState(jobId).items():
for troveTuple in troveTupleList:
self.troves.append((jobId, troveTuple))
self.states[jobId, troveTuple] = state
self.troves.sort()
def _troveStateUpdated(self, (jobId, troveTuple), state, status):
if (jobId, troveTuple) not in self.states:
self.updateTrovesForJob(jobId)
else:
self.states[jobId, troveTuple] = state
def _jobStateUpdated(self, jobId, state, status):
self.jobState = state
if self._isBuilding():
self.updateTrovesForJob(jobId)
def _jobTrovesSet(self, jobId, troveList):
self.updateTrovesForJob(jobId)
def _isBuilding(self):
return self.jobState in (buildjob.JOB_STATE_BUILD,
buildjob.JOB_STATE_STARTED)
def _isFinished(self):
return self.jobState in (
buildjob.JOB_STATE_FAILED, buildjob.JOB_STATE_BUILT)
class DisplayManager(object):#xmlrpc.BasicXMLRPCStatusSubscriber):
displayClass = JobLogDisplay
stateClass = DisplayState
def __init__(self, client, showBuildLogs, out=None, exitOnFinish=None):
self.termInfo = set_raw_mode()
if out is None:
out = open('/dev/tty', 'w')
self.state = self.stateClass(client)
self.display = self.displayClass(client, self.state, out)
self.client = client
self.troveToWatch = None
self.troveIndex = 0
self.showBuildLogs = showBuildLogs
if exitOnFinish is None:
exitOnFinish = False
self.exitOnFinish = exitOnFinish
def _receiveEvents(self, *args, **kw):
methodname = '_receiveEvents'
method = getattr(self.state, methodname, None)
if method:
try:
method(*args)
except errors.uncatchableExceptions:
raise
except Exception, err:
print 'Error in handler: %s\n%s' % (err,
traceback.format_exc())
method = getattr(self.display, methodname, None)
if method:
try:
method(*args)
except errors.uncatchableExceptions:
raise
except Exception, err:
print 'Error in handler: %s\n%s' % (err,
traceback.format_exc())
return ''
def getCurrentTrove(self):
if self.state.troves:
return self.state.troves[self.troveIndex]
else:
return None
def _primeOutput(self, jobId):
self.state._primeOutput(jobId)
self.display._msg('Watching job %s' % jobId)
if self.getCurrentTrove():
self.displayTrove(*self.getCurrentTrove())
def displayTrove(self, jobId, troveTuple):
self.display.setTroveToWatch(jobId, troveTuple)
state = self.state.getTroveState(jobId, troveTuple)
state = buildtrove.stateNames[state]
def _serveLoopHook(self):
ready = select.select([sys.stdin], [], [], 0.1)[0]
if ready:
cmd = sys.stdin.read(1)
if cmd == '\x1b':
cmd += sys.stdin.read(2)
if cmd == ' ':
self.do_switch_log()
elif cmd == 'n' or cmd == '\x1b[C':
self.do_next()
elif cmd == 'p' or cmd == '\x1b[D':
self.do_prev()
elif cmd == 'q':
sys.exit(0)
elif cmd == 'h':
self.do_help()
elif cmd == 'b':
self.do_next_building()
elif cmd == 'f':
self.do_next_failed()
elif cmd == 'i':
self.do_info()
elif cmd == 'l':
self.do_log()
elif cmd == 's':
self.do_status()
elif cmd == 'g':
self.do_goto()
if self.showBuildLogs:
for jobId, troveTuple in self.state.getBuildingTroves():
self.display.updateBuildLog(jobId, troveTuple)
def do_next(self):
if not self.state.troves:
return
self.troveIndex = (self.troveIndex + 1) % len(self.state.troves)
if self.getCurrentTrove():
self.displayTrove(*self.getCurrentTrove())
def do_next_building(self):
if not self.state.troves:
return
startIndex = self.troveIndex
self.troveIndex = (self.troveIndex + 1) % len(self.state.troves)
while (not self.state.isBuilding(*self.getCurrentTrove())
and self.troveIndex != startIndex):
self.troveIndex = (self.troveIndex + 1) % len(self.state.troves)
if self.troveIndex != startIndex:
self.displayTrove(*self.getCurrentTrove())
def do_goto(self):
if not self.state.troves:
print 'No troves loaded yet'
return
self.display.erasePrompt()
restore_terminal(*self.termInfo)
try:
troveName = raw_input("\nName or part of name of trove: ")
troveInfo = self.state.findTroveByName(troveName)
if not troveInfo:
print 'No trove starting with "%s"' % troveName
self.display.writePrompt()
return
while not self.getCurrentTrove() == troveInfo:
self.troveIndex = (self.troveIndex + 1) % len(self.state.troves)
self.displayTrove(*self.getCurrentTrove())
finally:
self.termInfo = set_raw_mode()
def do_next_failed(self):
if not self.state.troves:
return
startIndex = self.troveIndex
self.troveIndex = (self.troveIndex + 1) % len(self.state.troves)
while (not self.state.isFailed(*self.getCurrentTrove())
and self.troveIndex != startIndex):
self.troveIndex = (self.troveIndex + 1) % len(self.state.troves)
if self.troveIndex != startIndex:
self.displayTrove(*self.getCurrentTrove())
def do_prev(self):
if not self.state.troves:
return
self.troveIndex = (self.troveIndex - 1) % len(self.state.troves)
if self.getCurrentTrove():
self.displayTrove(*self.getCurrentTrove())
def do_info(self):
if not self.getCurrentTrove():
return
jobId, troveTuple = self.getCurrentTrove()
job = self.client.getJob(jobId)
trove = job.getTrove(*troveTuple)
dcfg = query.DisplayConfig(self.client, showTracebacks=True)
self.display.setWatchTroves(False)
self.display.erasePrompt()
query.displayTroveDetail(dcfg, job, trove, out=self.display.out)
self.display.writePrompt()
def do_log(self):
if not self.getCurrentTrove():
return
jobId, troveTuple = self.getCurrentTrove()
job = self.client.getJob(jobId)
trove = job.getTrove(*troveTuple)
moreData, data, mark = self.client.getTroveBuildLog(jobId,
troveTuple, 0)
if not data:
self.display._msg('No log yet.')
return
fd, path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(data)
try:
os.system('less %s' % path)
finally:
os.remove(path)
def do_help(self):
print
print "<space>: Turn on/off tailing of log"
print "<left>/<right>: move to next/prev trove in list"
print "b: move to next building trove"
print "f: move to next failed trove"
print "g: go to a particular trove"
print "h: print help"
print "i: display info for this trove"
print "l: display log for this trove in less"
print "q: quit"
print "s: display status on all troves"
def do_status(self):
self.display.setWatchTroves(False)
self.display.displayTroveStates()
def do_switch_log(self):
self.display.setWatchTroves(not self.display.getWatchTroves())
def _isFinished(self):
return self.display._isFinished()
def _shouldExit(self):
return self._isFinished() and self.exitOnFinish
def close(self):
self.display.close()
restore_terminal(*self.termInfo)
| sassoftware/rmake3 | rmake/cmdline/monitor.py | Python | apache-2.0 | 26,810 |
# A basic web server using sockets
import socket
PORT = 8090
MAX_OPEN_REQUESTS = 5
def process_client(clientsocket):
print(clientsocket)
data = clientsocket.recv(1024)
print(data)
web_contents = "<h1>Received</h1>"
f = open("myhtml.html", "r")
web_contents = f.read()
f.close()
web_headers = "HTTP/1.1 200"
web_headers += "\n" + "Content-Type: text/html"
web_headers += "\n" + "Content-Length: %i" % len(str.encode(web_contents))
clientsocket.send(str.encode(web_headers + "\n\n" + web_contents))
clientsocket.close()
# create an INET, STREAMing socket
serversocket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
# Let's use better the local interface name
hostname = "10.10.104.17"
try:
serversocket.bind((ip, PORT))
# become a server socket
# MAX_OPEN_REQUESTS connect requests before refusing outside connections
serversocket.listen(MAX_OPEN_REQUESTS)
while True:
# accept connections from outside
print ("Waiting for connections at %s %i" % (hostname, PORT))
(clientsocket, address) = serversocket.accept()
# now do something with the clientsocket
# in this case, we'll pretend this is a non threaded server
process_client(clientsocket)
except socket.error:
print("Problemas using port %i. Do you have permission?" % PORT)
| acs-test/openfda | PER_2017-18/clientServer/P1/server_web.py | Python | apache-2.0 | 1,505 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .inventory import (
GetInventoryRequest,
Inventory,
ListInventoriesRequest,
ListInventoriesResponse,
InventoryView,
)
from .os_policy import OSPolicy
from .os_policy_assignment_reports import (
GetOSPolicyAssignmentReportRequest,
ListOSPolicyAssignmentReportsRequest,
ListOSPolicyAssignmentReportsResponse,
OSPolicyAssignmentReport,
)
from .os_policy_assignments import (
CreateOSPolicyAssignmentRequest,
DeleteOSPolicyAssignmentRequest,
GetOSPolicyAssignmentRequest,
ListOSPolicyAssignmentRevisionsRequest,
ListOSPolicyAssignmentRevisionsResponse,
ListOSPolicyAssignmentsRequest,
ListOSPolicyAssignmentsResponse,
OSPolicyAssignment,
OSPolicyAssignmentOperationMetadata,
UpdateOSPolicyAssignmentRequest,
)
from .osconfig_common import FixedOrPercent
from .patch_deployments import (
CreatePatchDeploymentRequest,
DeletePatchDeploymentRequest,
GetPatchDeploymentRequest,
ListPatchDeploymentsRequest,
ListPatchDeploymentsResponse,
MonthlySchedule,
OneTimeSchedule,
PatchDeployment,
PausePatchDeploymentRequest,
RecurringSchedule,
ResumePatchDeploymentRequest,
UpdatePatchDeploymentRequest,
WeekDayOfMonth,
WeeklySchedule,
)
from .patch_jobs import (
AptSettings,
CancelPatchJobRequest,
ExecStep,
ExecStepConfig,
ExecutePatchJobRequest,
GcsObject,
GetPatchJobRequest,
GooSettings,
Instance,
ListPatchJobInstanceDetailsRequest,
ListPatchJobInstanceDetailsResponse,
ListPatchJobsRequest,
ListPatchJobsResponse,
PatchConfig,
PatchInstanceFilter,
PatchJob,
PatchJobInstanceDetails,
PatchRollout,
WindowsUpdateSettings,
YumSettings,
ZypperSettings,
)
from .vulnerability import (
CVSSv3,
GetVulnerabilityReportRequest,
ListVulnerabilityReportsRequest,
ListVulnerabilityReportsResponse,
VulnerabilityReport,
)
__all__ = (
"GetInventoryRequest",
"Inventory",
"ListInventoriesRequest",
"ListInventoriesResponse",
"InventoryView",
"OSPolicy",
"GetOSPolicyAssignmentReportRequest",
"ListOSPolicyAssignmentReportsRequest",
"ListOSPolicyAssignmentReportsResponse",
"OSPolicyAssignmentReport",
"CreateOSPolicyAssignmentRequest",
"DeleteOSPolicyAssignmentRequest",
"GetOSPolicyAssignmentRequest",
"ListOSPolicyAssignmentRevisionsRequest",
"ListOSPolicyAssignmentRevisionsResponse",
"ListOSPolicyAssignmentsRequest",
"ListOSPolicyAssignmentsResponse",
"OSPolicyAssignment",
"OSPolicyAssignmentOperationMetadata",
"UpdateOSPolicyAssignmentRequest",
"FixedOrPercent",
"CreatePatchDeploymentRequest",
"DeletePatchDeploymentRequest",
"GetPatchDeploymentRequest",
"ListPatchDeploymentsRequest",
"ListPatchDeploymentsResponse",
"MonthlySchedule",
"OneTimeSchedule",
"PatchDeployment",
"PausePatchDeploymentRequest",
"RecurringSchedule",
"ResumePatchDeploymentRequest",
"UpdatePatchDeploymentRequest",
"WeekDayOfMonth",
"WeeklySchedule",
"AptSettings",
"CancelPatchJobRequest",
"ExecStep",
"ExecStepConfig",
"ExecutePatchJobRequest",
"GcsObject",
"GetPatchJobRequest",
"GooSettings",
"Instance",
"ListPatchJobInstanceDetailsRequest",
"ListPatchJobInstanceDetailsResponse",
"ListPatchJobsRequest",
"ListPatchJobsResponse",
"PatchConfig",
"PatchInstanceFilter",
"PatchJob",
"PatchJobInstanceDetails",
"PatchRollout",
"WindowsUpdateSettings",
"YumSettings",
"ZypperSettings",
"CVSSv3",
"GetVulnerabilityReportRequest",
"ListVulnerabilityReportsRequest",
"ListVulnerabilityReportsResponse",
"VulnerabilityReport",
)
| googleapis/python-os-config | google/cloud/osconfig_v1/types/__init__.py | Python | apache-2.0 | 4,373 |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.chronos.detector.anomaly.ae_detector import AEDetector
class TestAEDetector(ZooTestCase):
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def create_data(self):
cycles = 10
time = np.arange(0, cycles * np.pi, 0.01)
data = np.sin(time)
data[600:800] = 10
return data
def test_ae_fit_score_rolled_keras(self):
y = self.create_data()
ad = AEDetector(roll_len=314)
ad.fit(y)
anomaly_scores = ad.score()
assert len(anomaly_scores) == len(y)
anomaly_indexes = ad.anomaly_indexes()
assert len(anomaly_indexes) == int(ad.ratio * len(y))
def test_ae_fit_score_rolled_pytorch(self):
y = self.create_data()
ad = AEDetector(roll_len=314, backend="torch")
ad.fit(y)
anomaly_scores = ad.score()
assert len(anomaly_scores) == len(y)
anomaly_indexes = ad.anomaly_indexes()
assert len(anomaly_indexes) == int(ad.ratio * len(y))
def test_ae_fit_score_unrolled(self):
y = self.create_data()
ad = AEDetector(roll_len=0)
ad.fit(y)
anomaly_scores = ad.score()
assert len(anomaly_scores) == len(y)
anomaly_indexes = ad.anomaly_indexes()
assert len(anomaly_indexes) == int(ad.ratio * len(y))
def test_corner_cases(self):
y = self.create_data()
ad = AEDetector(roll_len=314, backend="dummy")
with pytest.raises(ValueError):
ad.fit(y)
ad = AEDetector(roll_len=314)
with pytest.raises(RuntimeError):
ad.score()
y = np.array([1])
with pytest.raises(ValueError):
ad.fit(y)
y = self.create_data()
y = y.reshape(2, -1)
with pytest.raises(ValueError):
ad.fit(y)
| intel-analytics/analytics-zoo | pyzoo/test/zoo/chronos/detector/anomaly/test_ae_detector.py | Python | apache-2.0 | 2,541 |
from __future__ import absolute_import, unicode_literals
import os
from django import VERSION as DJANGO_VERSION
from django.utils.translation import ugettext_lazy as _
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "media-library"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '111.222.333.444']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "cloudSolarDB",
# Not used with sqlite3.
"USER": "valia",
# Not used with sqlite3.
"PASSWORD": "scenetwork",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "localhost",
# Set to empty string for default. Not used with sqlite3.
"PORT": "5432",
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_ROOT, "templates")
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.static",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
],
"builtins": [
"mezzanine.template.loader_tags",
],
},
},
]
if DJANGO_VERSION < (1, 9):
del TEMPLATES[0]["OPTIONS"]["builtins"]
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
# "mezzanine.accounts",
# "mezzanine.mobile",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
# Also force into sys.modules so it's visible to Django's autoreload.
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
import sys
import imp
module_name = "%s.local_settings" % PROJECT_APP
module = imp.new_module(module_name)
module.__file__ = f
sys.modules[module_name] = module
exec(open(f, "rb").read())
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
| nikdval/cloudSolar | solarApp/solar/settings.py | Python | artistic-2.0 | 11,845 |
import pyautogui, win32api, win32con, ctypes, autoit
from PIL import ImageOps, Image, ImageGrab
from numpy import *
import os
import time
import cv2
import random
from Bot import *
def main():
bot = Bot()
autoit.win_wait(bot.title, 5)
counter = 0
poitonUse = 0
cycle = True
fullCounter = 0
while cycle:
hpstatus = bot.checkOwnHp()
print 'hp ' + str(hpstatus)
if hpstatus == 0:
autoit.control_send(bot.title, '', '{F9}', 0)
bot.sleep(0.3,0.6)
print 'Dead'
cv2.imwrite('Dead' + str(int(time.time())) + '.png',bot.getScreen(leftCornerx,leftCornery,x2,fullY2))
cycle = False
if hpstatus == 1:
if poitonUse == 0:
autoit.control_send(bot.title, '', '{F10}', 0)
poitonUse += 1
if poitonUse > 5:
poitonUse = 0
else:
poitonUse = 0
res = bot.findHP();
print 'tgs ' + str(res)
if res == 3:
fullCounter += 1
print 'fc ' + str(fullCounter)
autoit.control_send(bot.title, '', '{F1}', 0)
else:
fullCounter = 0
if fullCounter > 4:
autoit.control_send(bot.title, '', '{ESC}', 0)
bot.sleep(0.3,0.6)
autoit.control_send(bot.title, '', '{F3}', 0)
bot.sleep(0.1,0.3)
autoit.control_send(bot.title, '', '{F1}', 0)
# bot.mouseRotate()
fullCounter = 0
if res > 0:
autoit.control_send(bot.title, '', '{F1}', 0)
counter = 0
if res == 1 or res == 3:
bot.sleep(0.3,0.6)
if res > 1 and res < 3:
bot.sleep(1,3)
if res == 1:
autoit.control_send(bot.title, '', '{F3}', 0)
bot.sleep(0.3,0.6)
autoit.control_send(bot.title, '', '{F2}', 0)
bot.sleep(0.3,0.6)
autoit.control_send(bot.title, '', '{F1}', 0)
else:
fullCounter = 0
if counter < 3:
autoit.control_send(bot.title, '', '{F3}', 0)
bot.sleep(0.5,0.8)
autoit.control_send(bot.title, '', '{F1}', 0)
print 'F3'
if counter > 2:
# bot.findTarget()
autoit.control_send(bot.title, '', '{F7}', 0)
# if counter > 3:
# autoit.control_send(bot.title, '', '{F8}', 0)
# counter = 0
counter += 1
print 'cnt ' + str(counter)
pass
if __name__ == '__main__':
main()
| oyajiro/l2bot | hf/wl.py | Python | artistic-2.0 | 2,661 |
# Even Fibonacci numbers
# Problem 2
# Each new term in the Fibonacci sequence is generated by adding the
# previous two terms. By starting with 1 and 2, the first 10 terms will be:
# 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
# By considering the terms in the Fibonacci sequence whose values do not exceed
# four million, find the sum of the even-valued terms.
def fibs():
previous, current = 0, 1
while True:
previous, current = current, previous + current
yield current
def problem2(bound):
sum = 0
for n in fibs():
if n >= bound:
break
if n % 2 == 0:
sum += n
return sum
print problem2(4000000) | nabilhassein/project-euler | p2.py | Python | bsd-2-clause | 638 |
from click.testing import CliRunner
from twelve_tone.cli import main
def test_main():
runner = CliRunner()
result = runner.invoke(main, [])
assert result.exit_code == 0
| accraze/python-twelve-tone | tests/test_twelve_tone.py | Python | bsd-2-clause | 186 |
import numpy as np
from numba import cuda, float32
from numba.cuda.testing import unittest, CUDATestCase
def generate_input(n):
A = np.array(np.arange(n * n).reshape(n, n), dtype=np.float32)
B = np.array(np.arange(n) + 0, dtype=A.dtype)
return A, B
class TestCudaNonDet(CUDATestCase):
def test_for_pre(self):
"""Test issue with loop not running due to bad sign-extension at the for loop
precondition.
"""
@cuda.jit(argtypes=[float32[:, :], float32[:, :], float32[:]])
def diagproduct(c, a, b):
startX, startY = cuda.grid(2)
gridX = cuda.gridDim.x * cuda.blockDim.x
gridY = cuda.gridDim.y * cuda.blockDim.y
height = c.shape[0]
width = c.shape[1]
for x in range(startX, width, (gridX)):
for y in range(startY, height, (gridY)):
c[y, x] = a[y, x] * b[x]
N = 8
A, B = generate_input(N)
F = np.empty(A.shape, dtype=A.dtype)
blockdim = (32, 8)
griddim = (1, 1)
dA = cuda.to_device(A)
dB = cuda.to_device(B)
dF = cuda.to_device(F, copy=False)
diagproduct[griddim, blockdim](dF, dA, dB)
E = np.dot(A, np.diag(B))
np.testing.assert_array_almost_equal(dF.copy_to_host(), E)
if __name__ == '__main__':
unittest.main()
| sklam/numba | numba/cuda/tests/cudapy/test_nondet.py | Python | bsd-2-clause | 1,378 |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name = 'wechat-python-sdk',
version = '0.5.7',
keywords = ('wechat', 'sdk', 'wechat sdk'),
description = u'微信公众平台Python开发包',
long_description = open("README.rst").read(),
license = 'BSD License',
url = 'https://github.com/doraemonext/wechat-python-sdk',
author = 'doraemonext',
author_email = '[email protected]',
packages = find_packages(),
include_package_data = True,
platforms = 'any',
install_requires=open("requirements.txt").readlines(),
)
| Beeblio/wechat-python-sdk | setup.py | Python | bsd-2-clause | 622 |
import sys
import itertools
from functools import reduce
from operator import iadd
import numpy
from PyQt4.QtGui import (
QFormLayout, QGraphicsRectItem, QGraphicsGridLayout,
QFontMetrics, QPen, QIcon, QPixmap, QLinearGradient, QPainter, QColor,
QBrush, QTransform, QGraphicsWidget, QApplication
)
from PyQt4.QtCore import Qt, QRect, QRectF, QSize, QPointF
from PyQt4.QtCore import pyqtSignal as Signal
import pyqtgraph as pg
import Orange.data
import Orange.misc
from Orange.clustering import hierarchical
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import itemmodels, colorbrewer
from .owhierarchicalclustering import DendrogramWidget, GraphicsSimpleTextList
from Orange.widgets.io import FileFormat
def _remove_item(item):
item.setParentItem(None)
scene = item.scene()
if scene is not None:
scene.removeItem(item)
class DistanceMapItem(pg.ImageItem):
"""A distance matrix image with user selectable regions.
"""
class SelectionRect(QGraphicsRectItem):
def boundingRect(self):
return super().boundingRect().adjusted(-1, -1, 1, 1)
def paint(self, painter, option, widget=None):
t = painter.transform()
rect = t.mapRect(self.rect())
painter.save()
painter.setTransform(QTransform())
pwidth = self.pen().widthF()
painter.setPen(self.pen())
painter.drawRect(rect.adjusted(pwidth, -pwidth, -pwidth, pwidth))
painter.restore()
selectionChanged = Signal()
Clear, Select, Commit = 1, 2, 4
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setAcceptedMouseButtons(Qt.LeftButton | Qt.RightButton)
self.setAcceptHoverEvents(True)
self.__selections = []
#: (QGraphicsRectItem, QRectF) | None
self.__dragging = None
def __select(self, area, command):
if command & self.Clear:
self.__clearSelections()
if command & self.Select:
area = area.normalized()
intersects = [rect.intersects(area)
for item, rect in self.__selections]
def partition(predicate, iterable):
t1, t2 = itertools.tee(iterable)
return (itertools.filterfalse(predicate, t1),
filter(predicate, t2))
def intersects(selection):
_, selarea = selection
return selarea.intersects(area)
disjoint, intersection = partition(intersects, self.__selections)
disjoint = list(disjoint)
intersection = list(intersection)
# merge intersecting selections into a single area
area = reduce(QRect.united, (area for _, area in intersection),
area)
visualarea = self.__visualRectForSelection(area)
item = DistanceMapItem.SelectionRect(visualarea, self)
item.setPen(QPen(Qt.red, 0))
selection = disjoint + [(item, area)]
for item, _ in intersection:
_remove_item(item)
self.__selections = selection
self.selectionChanged.emit()
def __elastic_band_select(self, area, command):
if command & self.Clear and self.__dragging:
item, area = self.__dragging
_remove_item(item)
self.__dragging = None
if command & self.Select:
if self.__dragging:
item, _ = self.__dragging
else:
item = DistanceMapItem.SelectionRect(self)
item.setPen(QPen(Qt.red, 0))
# intersection with existing regions
intersection = [(item, selarea)
for item, selarea in self.__selections
if area.intersects(selarea)]
fullarea = reduce(
QRect.united, (selarea for _, selarea in intersection),
area
)
visualarea = self.__visualRectForSelection(fullarea)
item.setRect(visualarea)
self.__dragging = item, area
if command & self.Commit and self.__dragging:
item, area = self.__dragging
self.__select(area, self.Select)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
r, c = self._cellAt(event.pos())
if r != -1 and c != -1:
# Clear existing selection
# TODO: Fix extended selection.
self.__select(QRect(), self.Clear)
selrange = QRect(c, r, 1, 1)
self.__elastic_band_select(selrange, self.Select | self.Clear)
elif event.button() == Qt.RightButton:
self.__select(QRect(), self.Clear)
super().mousePressEvent(event)
event.accept()
def mouseMoveEvent(self, event):
if event.buttons() & Qt.LeftButton and self.__dragging:
r1, c1 = self._cellAt(event.buttonDownPos(Qt.LeftButton))
r2, c2 = self._cellCloseTo(event.pos())
selrange = QRect(c1, r1, 1, 1).united(QRect(c2, r2, 1, 1))
self.__elastic_band_select(selrange, self.Select)
super().mouseMoveEvent(event)
event.accept()
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton and self.__dragging:
r1, c1 = self._cellAt(event.buttonDownPos(Qt.LeftButton))
r2, c2 = self._cellCloseTo(event.pos())
selrange = QRect(c1, r1, 1, 1).united(QRect(c2, r2, 1, 1))
self.__elastic_band_select(selrange, self.Select | self.Commit)
self.__elastic_band_select(QRect(), self.Clear)
super().mouseReleaseEvent(event)
event.accept()
def _cellAt(self, pos):
"""Return the i, j cell index at `pos` in local coordinates."""
if self.image is None:
return -1, -1
else:
h, w = self.image.shape
i, j = numpy.floor([pos.y(), pos.x()])
if 0 <= i < h and 0 <= j < w:
return int(i), int(j)
else:
return -1, -1
def _cellCloseTo(self, pos):
"""Return the i, j cell index closest to `pos` in local coordinates."""
if self.image is None:
return -1, -1
else:
h, w = self.image.shape
i, j = numpy.floor([pos.y(), pos.x()])
i = numpy.clip(i, 0, h - 1)
j = numpy.clip(j, 0, w - 1)
return int(i), int(j)
def __clearSelections(self):
for item, _ in self.__selections:
_remove_item(item)
self.__selections = []
def __visualRectForSelection(self, rect):
h, w = self.image.shape
rect = rect.normalized()
rect = rect.intersected(QRect(0, 0, w, h))
r1, r2 = rect.top(), rect.bottom() + 1
c1, c2 = rect.left(), rect.right() + 1
return QRectF(QPointF(c1, r1), QPointF(c2, r2))
def __selectionForArea(self, area):
r1, c1 = self._cellAt(area.topLeft())
r2, c2 = self._cellAt(area.bottomRight())
selarea = QRect(c1, r1, c2 - c1 + 1, r2 - r1 + 1)
return selarea.normalized()
def selections(self):
selections = [self.__selectionForArea(area)
for _, area in self.__selections]
return [(range(r.top(), r.bottom() + 1),
range(r.left(), r.right() + 1))
for r in selections]
def hoverMoveEvent(self, event):
super().hoverMoveEvent(event)
i, j = self._cellAt(event.pos())
if i != -1 and j != -1:
d = self.image[i, j]
self.setToolTip("{}, {}: {:.3f}".format(i, j, d))
else:
self.setToolTip("")
_color_palettes = sorted(colorbrewer.colorSchemes["sequential"].items()) + \
[("Blue-Yellow", {2: [(0, 0, 255), (255, 255, 0)]})]
_default_colormap_index = len(_color_palettes) - 1
class OWDistanceMap(widget.OWWidget):
name = "Distance Map"
description = "Visualize a distance matrix."
icon = "icons/DistanceMap.svg"
priority = 1200
inputs = [("Distances", Orange.misc.DistMatrix, "set_distances")]
outputs = [("Data", Orange.data.Table), ("Features", widget.AttributeList)]
settingsHandler = settings.PerfectDomainContextHandler()
#: type of ordering to apply to matrix rows/columns
NoOrdering, Clustering, OrderedClustering = 0, 1, 2
sorting = settings.Setting(NoOrdering)
colormap = settings.Setting(_default_colormap_index)
color_gamma = settings.Setting(0.0)
color_low = settings.Setting(0.0)
color_high = settings.Setting(1.0)
annotation_idx = settings.ContextSetting(0, exclude_metas=False)
autocommit = settings.Setting(True)
graph_name = "grid_widget"
# Disable clustering for inputs bigger than this
_MaxClustering = 3000
# Disable cluster leaf ordering for inputs bigger than this
_MaxOrderedClustering = 1000
def __init__(self):
super().__init__()
self.matrix = None
self._tree = None
self._ordered_tree = None
self._sorted_matrix = None
self._sort_indices = None
self._selection = None
box = gui.widgetBox(self.controlArea, "Element sorting", margin=0)
self.sorting_cb = gui.comboBox(
box, self, "sorting",
items=["None", "Clustering", "Clustering with ordered leaves"],
callback=self._invalidate_ordering)
box = gui.widgetBox(self.controlArea, "Colors")
self.colormap_cb = gui.comboBox(
box, self, "colormap", callback=self._update_color
)
self.colormap_cb.setIconSize(QSize(64, 16))
self.palettes = list(_color_palettes)
init_color_combo(self.colormap_cb, self.palettes, QSize(64, 16))
self.colormap_cb.setCurrentIndex(self.colormap)
form = QFormLayout(
formAlignment=Qt.AlignLeft,
labelAlignment=Qt.AlignLeft,
fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow
)
# form.addRow(
# "Gamma",
# gui.hSlider(box, self, "color_gamma", minValue=0.0, maxValue=1.0,
# step=0.05, ticks=True, intOnly=False,
# createLabel=False, callback=self._update_color)
# )
form.addRow(
"Low",
gui.hSlider(box, self, "color_low", minValue=0.0, maxValue=1.0,
step=0.05, ticks=True, intOnly=False,
createLabel=False, callback=self._update_color)
)
form.addRow(
"High",
gui.hSlider(box, self, "color_high", minValue=0.0, maxValue=1.0,
step=0.05, ticks=True, intOnly=False,
createLabel=False, callback=self._update_color)
)
box.layout().addLayout(form)
box = gui.widgetBox(self.controlArea, "Annotations")
self.annot_combo = gui.comboBox(box, self, "annotation_idx",
callback=self._invalidate_annotations,
contentsLength=12)
self.annot_combo.setModel(itemmodels.VariableListModel())
self.annot_combo.model()[:] = ["None", "Enumeration"]
self.controlArea.layout().addStretch()
gui.auto_commit(self.controlArea, self, "autocommit",
"Send data", "Auto send is on")
self.inline_graph_report()
self.view = pg.GraphicsView(background="w")
self.mainArea.layout().addWidget(self.view)
self.grid_widget = pg.GraphicsWidget()
self.grid = QGraphicsGridLayout()
self.grid_widget.setLayout(self.grid)
self.viewbox = pg.ViewBox(enableMouse=False, enableMenu=False)
self.viewbox.setAcceptedMouseButtons(Qt.NoButton)
self.viewbox.setAcceptHoverEvents(False)
self.grid.addItem(self.viewbox, 1, 1)
self.left_dendrogram = DendrogramWidget(
self.grid_widget, orientation=DendrogramWidget.Left,
selectionMode=DendrogramWidget.NoSelection,
hoverHighlightEnabled=False
)
self.left_dendrogram.setAcceptedMouseButtons(Qt.NoButton)
self.left_dendrogram.setAcceptHoverEvents(False)
self.top_dendrogram = DendrogramWidget(
self.grid_widget, orientation=DendrogramWidget.Top,
selectionMode=DendrogramWidget.NoSelection,
hoverHighlightEnabled=False
)
self.top_dendrogram.setAcceptedMouseButtons(Qt.NoButton)
self.top_dendrogram.setAcceptHoverEvents(False)
self.grid.addItem(self.left_dendrogram, 1, 0)
self.grid.addItem(self.top_dendrogram, 0, 1)
self.right_labels = TextList(
alignment=Qt.AlignLeft)
self.bottom_labels = TextList(
orientation=Qt.Horizontal, alignment=Qt.AlignRight)
self.grid.addItem(self.right_labels, 1, 2)
self.grid.addItem(self.bottom_labels, 2, 1)
self.view.setCentralItem(self.grid_widget)
self.left_dendrogram.hide()
self.top_dendrogram.hide()
self.right_labels.hide()
self.bottom_labels.hide()
self.matrix_item = None
self.dendrogram = None
self.grid_widget.scene().installEventFilter(self)
def set_distances(self, matrix):
self.closeContext()
self.clear()
self.error(0)
if matrix is not None:
N, _ = matrix.shape
if N < 2:
self.error(0, "Empty distance matrix.")
matrix = None
self.matrix = matrix
if matrix is not None:
self.set_items(matrix.row_items, matrix.axis)
else:
self.set_items(None)
if matrix is not None:
N, _ = matrix.shape
else:
N = 0
model = self.sorting_cb.model()
item = model.item(2)
msg = None
if N > OWDistanceMap._MaxOrderedClustering:
item.setFlags(item.flags() & ~Qt.ItemIsEnabled)
if self.sorting == OWDistanceMap.OrderedClustering:
self.sorting = OWDistanceMap.Clustering
msg = "Cluster ordering was disabled due to the input " \
"matrix being to big"
else:
item.setFlags(item.flags() | Qt.ItemIsEnabled)
item = model.item(1)
if N > OWDistanceMap._MaxClustering:
item.setFlags(item.flags() & ~Qt.ItemIsEnabled)
if self.sorting == OWDistanceMap.Clustering:
self.sorting = OWDistanceMap.NoOrdering
msg = "Clustering was disabled due to the input " \
"matrix being to big"
else:
item.setFlags(item.flags() | Qt.ItemIsEnabled)
self.information(1, msg)
def set_items(self, items, axis=1):
self.items = items
model = self.annot_combo.model()
if items is None:
model[:] = ["None", "Enumeration"]
elif not axis:
model[:] = ["None", "Enumeration", "Attribute names"]
elif isinstance(items, Orange.data.Table):
annot_vars = list(items.domain) + list(items.domain.metas)
model[:] = ["None", "Enumeration"] + annot_vars
self.annotation_idx = 0
self.openContext(items.domain)
elif isinstance(items, list) and \
all(isinstance(item, Orange.data.Variable) for item in items):
model[:] = ["None", "Enumeration", "Name"]
else:
model[:] = ["None", "Enumeration"]
self.annotation_idx = min(self.annotation_idx, len(model) - 1)
def clear(self):
self.matrix = None
self.cluster = None
self._tree = None
self._ordered_tree = None
self._sorted_matrix = None
self._selection = []
self._clear_plot()
def handleNewSignals(self):
if self.matrix is not None:
self._update_ordering()
self._setup_scene()
self._update_labels()
self.unconditional_commit()
def _clear_plot(self):
def remove(item):
item.setParentItem(None)
item.scene().removeItem(item)
if self.matrix_item is not None:
self.matrix_item.selectionChanged.disconnect(
self._invalidate_selection)
remove(self.matrix_item)
self.matrix_item = None
self._set_displayed_dendrogram(None)
self._set_labels(None)
def _cluster_tree(self):
if self._tree is None:
self._tree = hierarchical.dist_matrix_clustering(self.matrix)
return self._tree
def _ordered_cluster_tree(self):
if self._ordered_tree is None:
tree = self._cluster_tree()
self._ordered_tree = \
hierarchical.optimal_leaf_ordering(tree, self.matrix)
return self._ordered_tree
def _setup_scene(self):
self._clear_plot()
self.matrix_item = DistanceMapItem(self._sorted_matrix)
# Scale the y axis to compensate for pg.ViewBox's y axis invert
self.matrix_item.scale(1, -1)
self.viewbox.addItem(self.matrix_item)
# Set fixed view box range.
h, w = self._sorted_matrix.shape
self.viewbox.setRange(QRectF(0, -h, w, h), padding=0)
self.matrix_item.selectionChanged.connect(self._invalidate_selection)
if self.sorting == OWDistanceMap.NoOrdering:
tree = None
elif self.sorting == OWDistanceMap.Clustering:
tree = self._cluster_tree()
elif self.sorting == OWDistanceMap.OrderedClustering:
tree = self._ordered_cluster_tree()
self._set_displayed_dendrogram(tree)
self._update_color()
def _set_displayed_dendrogram(self, root):
self.left_dendrogram.set_root(root)
self.top_dendrogram.set_root(root)
self.left_dendrogram.setVisible(root is not None)
self.top_dendrogram.setVisible(root is not None)
constraint = 0 if root is None else -1 # 150
self.left_dendrogram.setMaximumWidth(constraint)
self.top_dendrogram.setMaximumHeight(constraint)
def _invalidate_ordering(self):
self._sorted_matrix = None
if self.matrix is not None:
self._update_ordering()
self._setup_scene()
def _update_ordering(self):
if self.sorting == OWDistanceMap.NoOrdering:
self._sorted_matrix = self.matrix
self._sort_indices = None
else:
if self.sorting == OWDistanceMap.Clustering:
tree = self._cluster_tree()
elif self.sorting == OWDistanceMap.OrderedClustering:
tree = self._ordered_cluster_tree()
leaves = hierarchical.leaves(tree)
indices = numpy.array([leaf.value.index for leaf in leaves])
X = self.matrix
self._sorted_matrix = X[indices[:, numpy.newaxis],
indices[numpy.newaxis, :]]
self._sort_indices = indices
def _invalidate_annotations(self):
if self.matrix is not None:
self._update_labels()
def _update_labels(self, ):
if self.annotation_idx == 0:
labels = None
elif self.annotation_idx == 1:
labels = [str(i + 1) for i in range(self.matrix.shape[0])]
elif self.annot_combo.model()[self.annotation_idx] == "Attribute names":
attr = self.matrix.row_items.domain.attributes
labels = [str(attr[i]) for i in range(self.matrix.shape[0])]
elif self.annotation_idx == 2 and \
isinstance(self.items, widget.AttributeList):
labels = [v.name for v in self.items]
elif isinstance(self.items, Orange.data.Table):
var = self.annot_combo.model()[self.annotation_idx]
column, _ = self.items.get_column_view(var)
labels = [var.repr_val(value) for value in column]
self._set_labels(labels)
def _set_labels(self, labels):
self._labels = labels
if labels and self.sorting != OWDistanceMap.NoOrdering:
sortind = self._sort_indices
labels = [labels[i] for i in sortind]
for textlist in [self.right_labels, self.bottom_labels]:
textlist.set_labels(labels or [])
textlist.setVisible(bool(labels))
constraint = -1 if labels else 0
self.right_labels.setMaximumWidth(constraint)
self.bottom_labels.setMaximumHeight(constraint)
def _update_color(self):
if self.matrix_item:
name, colors = self.palettes[self.colormap]
n, colors = max(colors.items())
colors = numpy.array(colors, dtype=numpy.ubyte)
low, high = self.color_low * 255, self.color_high * 255
points = numpy.linspace(low, high, n)
space = numpy.linspace(0, 255, 255)
r = numpy.interp(space, points, colors[:, 0], left=255, right=0)
g = numpy.interp(space, points, colors[:, 1], left=255, right=0)
b = numpy.interp(space, points, colors[:, 2], left=255, right=0)
colortable = numpy.c_[r, g, b]
self.matrix_item.setLookupTable(colortable)
def _invalidate_selection(self):
ranges = self.matrix_item.selections()
ranges = reduce(iadd, ranges, [])
indices = reduce(iadd, ranges, [])
if self.sorting != OWDistanceMap.NoOrdering:
sortind = self._sort_indices
indices = [sortind[i] for i in indices]
self._selection = list(sorted(set(indices)))
self.commit()
def commit(self):
datasubset = None
featuresubset = None
if not self._selection:
pass
elif isinstance(self.items, Orange.data.Table):
indices = self._selection
if self.matrix.axis == 1:
datasubset = self.items.from_table_rows(self.items, indices)
elif self.matrix.axis == 0:
domain = Orange.data.Domain(
[self.items.domain[i] for i in indices],
self.items.domain.class_vars,
self.items.domain.metas)
datasubset = Orange.data.Table.from_table(domain, self.items)
elif isinstance(self.items, widget.AttributeList):
subset = [self.items[i] for i in self._selection]
featuresubset = widget.AttributeList(subset)
self.send("Data", datasubset)
self.send("Features", featuresubset)
def onDeleteWidget(self):
super().onDeleteWidget()
self.clear()
def send_report(self):
annot = self.annot_combo.currentText()
if self.annotation_idx <= 1:
annot = annot.lower()
self.report_items((
("Sorting", self.sorting_cb.currentText().lower()),
("Annotations", annot)
))
if self.matrix is not None:
self.report_plot()
class TextList(GraphicsSimpleTextList):
def resizeEvent(self, event):
super().resizeEvent(event)
self._updateFontSize()
def _updateFontSize(self):
crect = self.contentsRect()
if self.orientation == Qt.Vertical:
h = crect.height()
else:
h = crect.width()
n = len(getattr(self, "label_items", []))
if n == 0:
return
if self.scene() is not None:
maxfontsize = self.scene().font().pointSize()
else:
maxfontsize = QApplication.instance().font().pointSize()
lineheight = max(1, h / n)
fontsize = min(self._point_size(lineheight), maxfontsize)
font = self.font()
font.setPointSize(fontsize)
self.setFont(font)
self.layout().invalidate()
self.layout().activate()
def _point_size(self, height):
font = self.font()
font.setPointSize(height)
fix = 0
while QFontMetrics(font).lineSpacing() > height and height - fix > 1:
fix += 1
font.setPointSize(height - fix)
return height - fix
##########################
# Color palette management
##########################
def palette_gradient(colors, discrete=False):
n = len(colors)
stops = numpy.linspace(0.0, 1.0, n, endpoint=True)
gradstops = [(float(stop), color) for stop, color in zip(stops, colors)]
grad = QLinearGradient(QPointF(0, 0), QPointF(1, 0))
grad.setStops(gradstops)
return grad
def palette_pixmap(colors, size):
img = QPixmap(size)
img.fill(Qt.transparent)
painter = QPainter(img)
grad = palette_gradient(colors)
grad.setCoordinateMode(QLinearGradient.ObjectBoundingMode)
painter.setPen(Qt.NoPen)
painter.setBrush(QBrush(grad))
painter.drawRect(0, 0, size.width(), size.height())
painter.end()
return img
def init_color_combo(cb, palettes, iconsize):
cb.clear()
iconsize = cb.iconSize()
for name, palette in palettes:
n, colors = max(palette.items())
colors = [QColor(*c) for c in colors]
cb.addItem(QIcon(palette_pixmap(colors, iconsize)), name,
palette)
def test(argv=sys.argv):
app = QApplication(list(argv))
argv = app.arguments()
if len(argv) > 1:
filename = argv[1]
else:
filename = "iris"
import sip
import Orange.distance
w = OWDistanceMap()
w.show()
w.raise_()
data = Orange.data.Table(filename)
dist = Orange.distance.Euclidean(data)
w.set_distances(dist)
w.handleNewSignals()
rval = app.exec_()
w.set_distances(None)
w.saveSettings()
w.onDeleteWidget()
sip.delete(w)
del w
return rval
if __name__ == "__main__":
sys.exit(test())
| kwikadi/orange3 | Orange/widgets/unsupervised/owdistancemap.py | Python | bsd-2-clause | 26,035 |
#!/usr/bin/env python
from rdflib import Graph, BNode, Literal, URIRef
from rdflib.namespace import FOAF
from flask import Flask
import flask_rdf
import random
app = Flask(__name__)
# set up a custom formatter to return turtle in text/plain to browsers
custom_formatter = flask_rdf.FormatSelector()
custom_formatter.wildcard_mimetype = 'text/plain'
custom_formatter.add_format('text/plain', 'turtle')
custom_decorator = flask_rdf.flask.Decorator(custom_formatter)
@app.route('/')
@app.route('/<path:path>')
@custom_decorator
def random_age(path=''):
graph = Graph('IOMemory', BNode())
graph.add((URIRef(path), FOAF.age, Literal(random.randint(20, 50))))
return graph
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| hufman/flask_rdf | examples/browser_default.py | Python | bsd-2-clause | 751 |
"""
Commands for X-ray Diffraction
Note that an XRD camera must be installed!
"""
def setup_epics_shutter(prefix='13MARCCD4:'):
"""
Setup Epics shutter for CCD camera
open /close pv = 13IDA:m70.VAL (SSA H WID)
open val = 0.080, close val = -0.020
"""
caput(prefix+'cam1:ShutterOpenEPICS.OUT', '13IDA:m70.VAL')
caput(prefix+'cam1:ShutterCloseEPICS.OUT', '13IDA:m70.VAL')
caput(prefix+'cam1:ShutterOpenEPICS.OCAL', '0.080')
caput(prefix+'cam1:ShutterCloseEPICS.OCAL', '-0.020')
caput(prefix+'cam1:ShutterOpenDelay', 1.50)
caput(prefix+'cam1:ShutterCloseDelay', 0.0)
caput(prefix+'cam1:ShutterMode', 1)
#enddef
def clear_epics_shutter(prefix='13MARCCD4:'):
"""
Clear Epics shutter PV for CCD camera
"""
caput(prefix+'cam1:ShutterOpenEPICS.OUT', '')
caput(prefix+'cam1:ShutterCloseEPICS.OUT', '')
caput(prefix+'cam1:ShutterOpenEPICS.OCAL', '0')
caput(prefix+'cam1:ShutterCloseEPICS.OCAL', '0')
caput(prefix+'cam1:ShutterOpenDelay', 0.1)
caput(prefix+'cam1:ShutterCloseDelay', 0.1)
caput(prefix+'cam1:ShutterMode', 0)
#enddef
def close_ccd_shutter():
caput('13IDA:m70.VAL', -0.025, wait=True)
sleep(1.0)
#enddef
def open_ccd_shutter():
caput('13IDA:m70.VAL', 0.080, wait=True)
sleep(1.0)
#enddef
def save_xrd(name, t=10, ext=None, prefix='13PEL1:', timeout=60.0):
## prefix='13PEL1:' prefix='13MARCCD1:'
"""
Save XRD image from XRD camera.
Parameters:
name (string): name of datafile
t (float): exposure time in seconds [default= 10]
ext (int or None): number for file extension
if left as None, the extension will be auto-incremented.
prefix (string): PV prefix for areaDetector camera ['13PE1:']
timeout (float): maximumn time in seconds to wait
for image to be saved [60]
Examples:
save_xrd('CeO2', t=20)
Note:
calls one of `save_xrd_marccd` or `save_xrd_pe`
See Also:
`save_xrd_marccd`, `save_xrd_pe`
"""
if 'mar' in prefix.lower():
save_xrd_marccd(name, t=t, ext=ext, prefix=prefix)
else:
save_xrd_pe(name, t=t, ext=ext, prefix=prefix)
#endif
#enddef
def save_xrd_pe(name, t=10, ext=None, prefix='13PEL1:', timeout=60.0):
"""
Save XRD image from Perkin-Elmer camera.
Parameters:
name (string): name of datafile
t (float): exposure time in seconds [default= 10]
ext (int or None): number for file extension
if left as None, the extension will be auto-incremented.
prefix (string): PV prefix for areaDetector camera ['13PE1:']
timeout (float): maximumn time in seconds to wait
for image to be saved [60]
Examples:
save_xrd_pe('CeO2', t=20)
Note:
detector pool PE detector has prefix like 'dp_pe2:'
"""
# prefix='dp_pe2:'
# save shutter mode, disable shutter for now
shutter_mode = caget(prefix+'cam1:ShutterMode')
caput(prefix+'cam1:ShutterMode', 0)
sleep(0.1)
caput(prefix+'cam1:Acquire', 0)
sleep(0.1)
print("Save XRD...")
caput(prefix+'TIFF1:EnableCallbacks', 0)
caput(prefix+'TIFF1:AutoSave', 0)
caput(prefix+'TIFF1:AutoIncrement', 1)
caput(prefix+'TIFF1:FileName', name)
if ext is not None:
caput(prefix+'TIFF1:FileNumber', ext)
#endif
caput(prefix+'TIFF1:EnableCallbacks', 1)
caput(prefix+'cam1:ImageMode', 3)
sleep(0.5)
acq_time =caget(prefix+'cam1:AcquireTime')
numimages = int(t*1.0/acq_time)
caput(prefix+'cam1:NumImages', numimages)
# expose
caput(prefix+'cam1:Acquire', 1)
sleep(0.5 + max(0.5, 0.5*t))
t0 = clock()
nrequested = caget(prefix+'cam1:NumImages')
print('Wait for Acquire ... %i' % nrequested)
while ((1 == caget(prefix+'cam1:Acquire')) and
(clock()-t0 < timeout)):
sleep(0.25)
#endwhile
print('Acquire Done, writing file %s' % name)
sleep(0.1)
# clean up, returning to short dwell time
caput(prefix+'TIFF1:WriteFile', 1)
caput(prefix+'TIFF1:EnableCallbacks', 0)
sleep(0.5)
caput(prefix+'cam1:ImageMode', 2)
caput(prefix+'cam1:ShutterMode', shutter_mode)
sleep(0.5)
caput(prefix+'cam1:Acquire', 1)
sleep(1.5)
#enddef
def save_xrd_marccd(name, t=10, ext=None, prefix='13MARCCD4:', timeout=60.0):
"""
save XRD image from MARCCD (Rayonix 165) camera to file
Parameters:
name (string): name of datafile
t (float): exposure time in seconds [default= 10]
ext (int or None): number for file extension
if left as None, the extension will be auto-incremented.
prefix (string): PV prefix for areaDetector camera ['13MARCCD1:']
timeout (float): maximumn time in seconds to wait
for image to be saved [60]
Examples:
save_xrd_marccd('CeO2', t=20)
Note:
The marccd requires the Epics Shutter to be set up correctly.
"""
start_time = systime()
# save shutter mode, disable shutter for now
shutter_mode = caget(prefix+'cam1:ShutterMode')
# NOTE: Need to start acquisition with the shutter
# having been closed for awhile
# using the SSA H Width as shutter we want
# NOTE: Need to start acquisition with the shutter
# having been closed for awhile
# using the SSA H Width as shutter we want
caput(prefix+'cam1:ShutterControl', 0)
close_ccd_shutter()
caput(prefix+'cam1:FrameType', 0)
caput(prefix+'cam1:ImageMode', 0)
caput(prefix+'cam1:AutoSave', 1)
caput(prefix+'cam1:AutoIncrement', 1)
caput(prefix+'cam1:FileName', name)
if ext is not None:
caput(prefix+'cam1:FileNumber', ext)
#endif
caput(prefix+'cam1:AcquireTime', t)
sleep(0.1)
# expose
caput(prefix+'cam1:Acquire', 1)
sleep(1.0 + max(1.0, t))
t0 = systime()
print('Wait for Acquire ... ')
while ((1 == caget(prefix+'cam1:Acquire')) and
(clock()-t0 < timeout)):
sleep(0.25)
#endwhile
fname = caget(prefix+'cam1:FullFileName_RBV', as_string=True)
print('Acquire Done! %.3f sec' % (systime()-start_time))
print('Wrote %s' % fname)
sleep(1.0)
caput(prefix+'cam1:ShutterControl', 1)
#enddef
def xrd_at(posname, t):
move_samplestage(posname, wait=True)
save_xrd(posname, t=t, ext=1)
#enddef
def xrd_bgr_marccd(prefix='13MARCCD4:', timeout=120.0):
"""
collect XRD Background for marccd
Parameters:
prefix (string): PV prefix for camera ['13MARCCD1:']
timeout (float): maximum time to wait [120]
"""
caput(prefix+'cam1:ShutterControl', 0)
caput(prefix+'cam1:FrameType', 1)
sleep(0.1)
caput(prefix+'cam1:Acquire', 1)
sleep(3)
t0 = clock()
print('Wait for Acquire ... ')
while ((1 == caget(prefix+'cam1:Acquire')) and
(clock()-t0 < timeout)):
sleep(0.25)
#endwhile
sleep(2.0)
#enddef
def xrd_bgr(prefix='13PEL1:'):
"""
collect XRD Background for Perkin Elmer
Parameters:
prefix (string): PV prefix for camera ['13MARCCD1:']
"""
caput(prefix+'cam1:ShutterMode', 1)
immode = caget(prefix+'cam1:ImageMode')
caput(prefix+'cam1:ImageMode', 1)
caput(prefix+'cam1:ShutterControl', 0)
sleep(3)
caput(prefix+'cam1:PEAcquireOffset', 1)
sleep(5)
caput(prefix+'cam1:ShutterControl', 1)
caput(prefix+'cam1:ImageMode', immode)
caput(prefix+'cam1:Acquire', 1)
sleep(2.0)
#enddef
| newville/microprobe_docs | doc/macros/xrd.py | Python | bsd-2-clause | 7,656 |
# test_queue.py
# Created on: Dec 2, 2015
# Author: pchero
import os
import sys
import test_common
def main():
ast = test_common.Ami()
ast.username = sys.argv[1]
ast.password = sys.argv[2]
if ast.conn() == False:
print("Could not connect.")
return 1
# create dlma
ret = ast.sendCmd("OutQueueCreate", Name="TestDlma", Detail="TestDetail")
res = ret
if res[0]["Response"] != "Success":
print("Couldn not pass the test_queue. res[%s]" % res)
raise "test_queue"
for i in range(10):
evt = ast.recvEvt()
if evt["Event"] == "OutQueueCreate":
break
if evt["Name"] != "TestDlma" or evt["Detail"] != "TestDetail":
print("Couldn not pass the test_queue. ret[%s]" % evt)
raise "test_queue"
test_uuid = evt["Uuid"]
# get dlma
ret = ast.sendCmd("OutQueueShow", Uuid=test_uuid)
flg = False
for i in range(len(ret)):
msg = ret[i]
if "Uuid" not in msg:
continue
if msg["Uuid"] == test_uuid:
flg = True
break
if flg == False:
print("Couldn not pass the test_queue. ret[%s]" % ret)
raise "test_queue"
# delete dlma
ret = ast.sendCmd("OutQueueDelete", Uuid=test_uuid)
if ret[0]["Response"] != "Success":
print("Couldn not pass the test_queue. ret[%s]" % ret)
raise "test_queue"
for i in range(10):
ret = ast.recvEvt()
if ret["Event"] == "OutQueueDelete":
break
if ret["Uuid"] != test_uuid:
print("Could not pass the test_queue. ret[%s]" % ret)
raise "test_queue"
# get campaign
ret = ast.sendCmd("OutQueueShow", Uuid=test_uuid)
for i in range(len(ret)):
msg = ret[i]
if "Uuid" not in msg:
continue
if msg["Uuid"] == test_uuid:
print("Could not pass the test_queue. ret[%s], uuid[%s]" % (ret, test_uuid))
raise "test_queue"
return 0
if __name__ == '__main__':
main()
| pchero/asterisk-outbound | test/test_queue.py | Python | bsd-2-clause | 2,084 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_title01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with default title."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [46165376, 54462720]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5',
'name': 'Foo'})
chart.set_title({'none': True})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_chart_title01.py | Python | bsd-2-clause | 1,535 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-06 02:47
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('car_model', models.CharField(max_length=20)),
('color', models.CharField(max_length=20)),
('year', models.SmallIntegerField(help_text='Use year as YYYY.', validators=[django.core.validators.RegexValidator('^[0-9]{4}$', 'Year in invalid format!', 'invalid')])),
('mileage', models.IntegerField(default=0, help_text='Or your car is brand new or it have some mileage traveled', validators=[django.core.validators.MinValueValidator(0)])),
],
),
migrations.CreateModel(
name='OilChange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(verbose_name='date changed')),
('mileage', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='car.Car')),
],
),
migrations.CreateModel(
name='Refuel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(verbose_name='date refueled')),
('liters', models.DecimalField(decimal_places=3, max_digits=7)),
('fuel_price', models.DecimalField(decimal_places=2, max_digits=4)),
('mileage', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('fuel_type', models.CharField(choices=[('Regular gas', 'Regular gas'), ('Premium gas', 'Premium gas'), ('Alcohol', 'Alcohol'), ('Diesel', 'Diesel')], default='Regular gas', max_length=20)),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='car.Car')),
],
),
]
| italopaiva/your.car | yourcar/car/migrations/0001_initial.py | Python | bsd-2-clause | 2,468 |
"""Widget for creating classes from non-numeric attribute by substrings"""
import re
from itertools import count
import numpy as np
from AnyQt.QtWidgets import QGridLayout, QLabel, QLineEdit, QSizePolicy
from AnyQt.QtCore import QSize, Qt
from Orange.data import StringVariable, DiscreteVariable, Domain
from Orange.data.table import Table
from Orange.statistics.util import bincount
from Orange.preprocess.transformation import Transformation, Lookup
from Orange.widgets import gui, widget
from Orange.widgets.settings import DomainContextHandler, ContextSetting
from Orange.widgets.utils.itemmodels import DomainModel
from Orange.widgets.widget import Msg
def map_by_substring(a, patterns, case_sensitive, match_beginning):
"""
Map values in a using a list of patterns. The patterns are considered in
order of appearance.
Args:
a (np.array): input array of `dtype` `str`
patterns (list of str): list of stirngs
case_sensitive (bool): case sensitive match
match_beginning (bool): match only at the beginning of the string
Returns:
np.array of floats representing indices of matched patterns
"""
res = np.full(len(a), np.nan)
if not case_sensitive:
a = np.char.lower(a)
patterns = (pattern.lower() for pattern in patterns)
for val_idx, pattern in reversed(list(enumerate(patterns))):
indices = np.char.find(a, pattern)
matches = indices == 0 if match_beginning else indices != -1
res[matches] = val_idx
return res
class ValueFromStringSubstring(Transformation):
"""
Transformation that computes a discrete variable from a string variable by
pattern matching.
Given patterns `["abc", "a", "bc", ""]`, string data
`["abcd", "aa", "bcd", "rabc", "x"]` is transformed to values of the new
attribute with indices`[0, 1, 2, 0, 3]`.
Args:
variable (:obj:`~Orange.data.StringVariable`): the original variable
patterns (list of str): list of string patterns
case_sensitive (bool, optional): if set to `True`, the match is case
sensitive
match_beginning (bool, optional): if set to `True`, the pattern must
appear at the beginning of the string
"""
def __init__(self, variable, patterns,
case_sensitive=False, match_beginning=False):
super().__init__(variable)
self.patterns = patterns
self.case_sensitive = case_sensitive
self.match_beginning = match_beginning
def transform(self, c):
"""
Transform the given data.
Args:
c (np.array): an array of type that can be cast to dtype `str`
Returns:
np.array of floats representing indices of matched patterns
"""
nans = np.equal(c, None)
c = c.astype(str)
c[nans] = ""
res = map_by_substring(
c, self.patterns, self.case_sensitive, self.match_beginning)
res[nans] = np.nan
return res
class ValueFromDiscreteSubstring(Lookup):
"""
Transformation that computes a discrete variable from discrete variable by
pattern matching.
Say that the original attribute has values
`["abcd", "aa", "bcd", "rabc", "x"]`. Given patterns
`["abc", "a", "bc", ""]`, the values are mapped to the values of the new
attribute with indices`[0, 1, 2, 0, 3]`.
Args:
variable (:obj:`~Orange.data.DiscreteVariable`): the original variable
patterns (list of str): list of string patterns
case_sensitive (bool, optional): if set to `True`, the match is case
sensitive
match_beginning (bool, optional): if set to `True`, the pattern must
appear at the beginning of the string
"""
def __init__(self, variable, patterns,
case_sensitive=False, match_beginning=False):
super().__init__(variable, [])
self.case_sensitive = case_sensitive
self.match_beginning = match_beginning
self.patterns = patterns # Finally triggers computation of the lookup
def __setattr__(self, key, value):
"""__setattr__ is overloaded to recompute the lookup table when the
patterns, the original attribute or the flags change."""
super().__setattr__(key, value)
if hasattr(self, "patterns") and \
key in ("case_sensitive", "match_beginning", "patterns",
"variable"):
self.lookup_table = map_by_substring(
self.variable.values, self.patterns,
self.case_sensitive, self.match_beginning)
class OWCreateClass(widget.OWWidget):
name = "Create Class"
description = "Create class attribute from a string attribute"
icon = "icons/CreateClass.svg"
category = "Data"
keywords = ["data"]
inputs = [("Data", Table, "set_data")]
outputs = [("Data", Table)]
want_main_area = False
settingsHandler = DomainContextHandler()
attribute = ContextSetting(None)
class_name = ContextSetting("class")
rules = ContextSetting({})
match_beginning = ContextSetting(False)
case_sensitive = ContextSetting(False)
TRANSFORMERS = {StringVariable: ValueFromStringSubstring,
DiscreteVariable: ValueFromDiscreteSubstring}
class Warning(widget.OWWidget.Warning):
no_nonnumeric_vars = Msg("Data contains only numeric variables.")
def __init__(self):
super().__init__()
self.data = None
# The following lists are of the same length as self.active_rules
#: list of pairs with counts of matches for each patter when the
# patterns are applied in order and when applied on the entire set,
# disregarding the preceding patterns
self.match_counts = []
#: list of list of QLineEdit: line edit pairs for each pattern
self.line_edits = []
#: list of QPushButton: list of remove buttons
self.remove_buttons = []
#: list of list of QLabel: pairs of labels with counts
self.counts = []
combo = gui.comboBox(
self.controlArea, self, "attribute", label="From column: ",
box=True, orientation=Qt.Horizontal, callback=self.update_rules,
model=DomainModel(valid_types=(StringVariable, DiscreteVariable)))
# Don't use setSizePolicy keyword argument here: it applies to box,
# not the combo
combo.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)
patternbox = gui.vBox(self.controlArea, box=True)
#: QWidget: the box that contains the remove buttons, line edits and
# count labels. The lines are added and removed dynamically.
self.rules_box = rules_box = QGridLayout()
patternbox.layout().addLayout(self.rules_box)
box = gui.hBox(patternbox)
gui.button(
box, self, "+", callback=self.add_row, autoDefault=False, flat=True,
minimumSize=(QSize(20, 20)))
gui.rubber(box)
self.rules_box.setColumnMinimumWidth(1, 70)
self.rules_box.setColumnMinimumWidth(0, 10)
self.rules_box.setColumnStretch(0, 1)
self.rules_box.setColumnStretch(1, 1)
self.rules_box.setColumnStretch(2, 100)
rules_box.addWidget(QLabel("Name"), 0, 1)
rules_box.addWidget(QLabel("Substring"), 0, 2)
rules_box.addWidget(QLabel("#Instances"), 0, 3, 1, 2)
self.update_rules()
gui.lineEdit(
self.controlArea, self, "class_name",
label="Name for the new class:",
box=True, orientation=Qt.Horizontal)
optionsbox = gui.vBox(self.controlArea, box=True)
gui.checkBox(
optionsbox, self, "match_beginning", "Match only at the beginning",
callback=self.options_changed)
gui.checkBox(
optionsbox, self, "case_sensitive", "Case sensitive",
callback=self.options_changed)
layout = QGridLayout()
gui.widgetBox(self.controlArea, orientation=layout)
for i in range(3):
layout.setColumnStretch(i, 1)
layout.addWidget(self.report_button, 0, 0)
apply = gui.button(None, self, "Apply", autoDefault=False,
callback=self.apply)
layout.addWidget(apply, 0, 2)
# TODO: Resizing upon changing the number of rules does not work
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
@property
def active_rules(self):
"""
Returns the class names and patterns corresponding to the currently
selected attribute. If the attribute is not yet in the dictionary,
set the default.
"""
return self.rules.setdefault(self.attribute and self.attribute.name,
[["", ""], ["", ""]])
def rules_to_edits(self):
"""Fill the line edites with the rules from the current settings."""
for editr, textr in zip(self.line_edits, self.active_rules):
for edit, text in zip(editr, textr):
edit.setText(text)
def set_data(self, data):
"""Input data signal handler."""
self.closeContext()
self.rules = {}
self.data = data
model = self.controls.attribute.model()
model.set_domain(data and data.domain)
self.Warning.no_nonnumeric_vars(shown=data is not None and not model)
if not model:
self.attribute = None
self.send("Data", None)
return
self.attribute = model[0]
self.openContext(data)
self.update_rules()
self.apply()
def update_rules(self):
"""Called when the rules are changed: adjust the number of lines in
the form and fill them, update the counts. The widget does not have
auto-apply."""
self.adjust_n_rule_rows()
self.rules_to_edits()
self.update_counts()
# TODO: Indicator that changes need to be applied
def options_changed(self):
self.update_counts()
def adjust_n_rule_rows(self):
"""Add or remove lines if needed and fix the tab order."""
def _add_line():
self.line_edits.append([])
n_lines = len(self.line_edits)
for coli in range(1, 3):
edit = QLineEdit()
self.line_edits[-1].append(edit)
self.rules_box.addWidget(edit, n_lines, coli)
edit.textChanged.connect(self.sync_edit)
button = gui.button(
None, self, label='×', flat=True, height=20,
styleSheet='* {font-size: 16pt; color: silver}'
'*:hover {color: black}',
autoDefault=False, callback=self.remove_row)
button.setMinimumSize(QSize(12, 20))
self.remove_buttons.append(button)
self.rules_box.addWidget(button, n_lines, 0)
self.counts.append([])
for coli, kwargs in enumerate(
(dict(alignment=Qt.AlignRight),
dict(alignment=Qt.AlignLeft, styleSheet="color: gray"))):
label = QLabel(**kwargs)
self.counts[-1].append(label)
self.rules_box.addWidget(label, n_lines, 3 + coli)
def _remove_line():
for edit in self.line_edits.pop():
edit.deleteLater()
self.remove_buttons.pop().deleteLater()
for label in self.counts.pop():
label.deleteLater()
def _fix_tab_order():
prev = None
for row, rule in zip(self.line_edits, self.active_rules):
for col_idx, edit in enumerate(row):
edit.row, edit.col_idx = rule, col_idx
if prev is not None:
self.setTabOrder(prev, edit)
prev = edit
n = len(self.active_rules)
while n > len(self.line_edits):
_add_line()
while len(self.line_edits) > n:
_remove_line()
_fix_tab_order()
def add_row(self):
"""Append a new row at the end."""
self.active_rules.append(["", ""])
self.adjust_n_rule_rows()
self.update_counts()
def remove_row(self):
"""Remove a row."""
remove_idx = self.remove_buttons.index(self.sender())
del self.active_rules[remove_idx]
self.update_rules()
self.update_counts()
def sync_edit(self, text):
"""Handle changes in line edits: update the active rules and counts"""
edit = self.sender()
edit.row[edit.col_idx] = text
self.update_counts()
def class_labels(self):
"""Construct a list of class labels. Empty labels are replaced with
C1, C2, C3. If C<n> already appears in the list of values given by
the user, the labels start at C<n+1> instead.
"""
largest_c = max((int(label[1:]) for label, _ in self.active_rules
if re.match("^C\\d+", label)),
default=0)
class_count = count(largest_c + 1)
return [label_edit.text() or "C{}".format(next(class_count))
for label_edit, _ in self.line_edits]
def update_counts(self):
"""Recompute and update the counts of matches."""
def _matcher(strings, pattern):
"""Return indices of strings into patterns; consider case
sensitivity and matching at the beginning. The given strings are
assumed to be in lower case if match is case insensitive. Patterns
are fixed on the fly."""
if not self.case_sensitive:
pattern = pattern.lower()
indices = np.char.find(strings, pattern.strip())
return indices == 0 if self.match_beginning else indices != -1
def _lower_if_needed(strings):
return strings if self.case_sensitive else np.char.lower(strings)
def _string_counts():
"""
Generate pairs of arrays for each rule until running out of data
instances. np.sum over the two arrays in each pair gives the
number of matches of the remaining instances (considering the
order of patterns) and of the original data.
For _string_counts, the arrays contain bool masks referring to the
original data
"""
nonlocal data
data = data.astype(str)
data = data[~np.char.equal(data, "")]
data = _lower_if_needed(data)
remaining = np.array(data)
for _, pattern in self.active_rules:
matching = _matcher(remaining, pattern)
total_matching = _matcher(data, pattern)
yield matching, total_matching
remaining = remaining[~matching]
if len(remaining) == 0:
break
def _discrete_counts():
"""
Generate pairs similar to _string_counts, except that the arrays
contain bin counts for the attribute's values matching the pattern.
"""
attr_vals = np.array(attr.values)
attr_vals = _lower_if_needed(attr_vals)
bins = bincount(data, max_val=len(attr.values) - 1)[0]
remaining = np.array(bins)
for _, pattern in self.active_rules:
matching = _matcher(attr_vals, pattern)
yield remaining[matching], bins[matching]
remaining[matching] = 0
if not np.any(remaining):
break
def _clear_labels():
"""Clear all labels"""
for lab_matched, lab_total in self.counts:
lab_matched.setText("")
lab_total.setText("")
def _set_labels():
"""Set the labels to show the counts"""
for (n_matched, n_total), (lab_matched, lab_total), (lab, patt) in \
zip(self.match_counts, self.counts, self.active_rules):
n_before = n_total - n_matched
lab_matched.setText("{}".format(n_matched))
if n_before and (lab or patt):
lab_total.setText("+ {}".format(n_before))
if n_matched:
tip = "{} of the {} matching instances are already " \
"covered above".format(n_before, n_total)
else:
tip = "All matching instances are already covered above"
lab_total.setToolTip(tip)
lab_matched.setToolTip(tip)
def _set_placeholders():
"""Set placeholders for empty edit lines"""
matches = [n for n, _ in self.match_counts] + \
[0] * len(self.line_edits)
for n_matched, (_, patt) in zip(matches, self.line_edits):
if not patt.text():
patt.setPlaceholderText(
"(remaining instances)" if n_matched else "(unused)")
labels = self.class_labels()
for label, (lab_edit, _) in zip(labels, self.line_edits):
if not lab_edit.text():
lab_edit.setPlaceholderText(label)
_clear_labels()
attr = self.attribute
if attr is None:
return
counters = {StringVariable: _string_counts,
DiscreteVariable: _discrete_counts}
data = self.data.get_column_view(attr)[0]
self.match_counts = [[int(np.sum(x)) for x in matches]
for matches in counters[type(attr)]()]
_set_labels()
_set_placeholders()
def apply(self):
"""Output the transformed data."""
if not self.attribute:
self.send("Data", None)
return
domain = self.data.domain
rules = self.active_rules
# Transposition + stripping
valid_rules = [label or pattern or n_matches
for (label, pattern), n_matches in
zip(rules, self.match_counts)]
patterns = [pattern
for (_, pattern), valid in zip(rules, valid_rules)
if valid]
names = [name for name, valid in zip(self.class_labels(), valid_rules)
if valid]
transformer = self.TRANSFORMERS[type(self.attribute)]
compute_value = transformer(
self.attribute, patterns, self.case_sensitive, self.match_beginning)
new_class = DiscreteVariable(
self.class_name, names, compute_value=compute_value)
new_domain = Domain(
domain.attributes, new_class, domain.metas + domain.class_vars)
new_data = Table(new_domain, self.data)
self.send("Data", new_data)
def send_report(self):
def _cond_part():
rule = "<b>{}</b> ".format(class_name)
if patt:
rule += "if <b>{}</b> contains <b>{}</b>".format(
self.attribute.name, patt)
else:
rule += "otherwise"
return rule
def _count_part():
if not n_matched:
return "all {} matching instances are already covered " \
"above".format(n_total)
elif n_matched < n_total and patt:
return "{} matching instances (+ {} that are already " \
"covered above".format(n_matched, n_total - n_matched)
else:
return "{} matching instances".format(n_matched)
if not self.attribute:
return
self.report_items("Input", [("Source attribute", self.attribute.name)])
output = ""
names = self.class_labels()
for (n_matched, n_total), class_name, (lab, patt) in \
zip(self.match_counts, names, self.active_rules):
if lab or patt or n_total:
output += "<li>{}; {}</li>".format(_cond_part(), _count_part())
if output:
self.report_items("Output", [("Class name", self.class_name)])
self.report_raw("<ol>{}</ol>".format(output))
def main(): # pragma: no cover
"""Simple test for manual inspection of the widget"""
import sys
from AnyQt.QtWidgets import QApplication
a = QApplication(sys.argv)
table = Table("zoo")
ow = OWCreateClass()
ow.show()
ow.set_data(table)
a.exec()
ow.saveSettings()
if __name__ == "__main__": # pragma: no cover
main()
| cheral/orange3 | Orange/widgets/data/owcreateclass.py | Python | bsd-2-clause | 20,686 |
"""
This is a subfile for IsyClass.py
These funtions are accessable via the Isy class opj
"""
__author__ = 'Peter Shipley <[email protected]>'
__copyright__ = "Copyright (C) 2013 Peter Shipley"
__license__ = "BSD"
import time
##
## Climate funtions
##
def load_clim(self):
""" Load climate data from ISY device
args: none
internal function call
"""
if self.debug & 0x01:
print("load_clim")
clim_tree = self._getXMLetree("/rest/climate")
self.climateinfo = dict()
if clim_tree is None:
return
# Isy._printXML(self.climateinfo)
for cl in clim_tree.iter("climate"):
for k, v in cl.items():
self.climateinfo[k] = v
for ce in list(cl):
self.climateinfo[ce.tag] = ce.text
self.climateinfo["time"] = time.gmtime()
def clim_get_val(self, prop):
pass
def clim_query(self):
""" returns dictionary of climate info """
if not self.climateinfo:
self.load_clim()
#
# ADD CODE to check self.cachetime
#
return self.climateinfo
def clim_iter(self):
""" Iterate though climate values
args:
None
returns:
Return an iterator over the climate values
"""
if not self.climateinfo:
self.load_clim()
k = self.climateinfo.keys()
for p in k:
yield self.climateinfo[p]
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
import __main__
print(__main__.__file__)
print("syntax ok")
exit(0)
| fxstein/ISYlib-python | ISY/_isyclimate.py | Python | bsd-2-clause | 1,533 |
#!/usr/bin/env python3
from zested.main import main
if __name__ == "__main__":
main()
| Luthaf/Zested | Zested.py | Python | bsd-2-clause | 91 |
from __future__ import print_function
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, useragents
from streamlink.stream import HDSStream
from streamlink.stream import HLSStream
class TF1(Plugin):
url_re = re.compile(r"https?://(?:www\.)?(?:tf1\.fr/(\w+)/direct|(lci).fr/direct)/?")
embed_url = "http://www.wat.tv/embedframe/live{0}"
embed_re = re.compile(r"urlLive.*?:.*?\"(http.*?)\"", re.MULTILINE)
api_url = "http://www.wat.tv/get/{0}/591997"
swf_url = "http://www.wat.tv/images/v70/PlayerLite.swf"
hds_channel_remap = {"tf1": "androidliveconnect", "lci": "androidlivelci"}
hls_channel_remap = {"lci": "LCI", "tf1": "V4"}
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_hds_streams(self, channel):
channel = self.hds_channel_remap.get(channel, "{0}live".format(channel))
manifest_url = http.get(self.api_url.format(channel),
params={"getURL": 1},
headers={"User-Agent": useragents.FIREFOX}).text
for s in HDSStream.parse_manifest(self.session,
manifest_url,
pvswf=self.swf_url,
headers={"User-Agent": useragents.FIREFOX}).items():
yield s
def _get_hls_streams(self, channel):
channel = self.hls_channel_remap.get(channel, channel)
embed_url = self.embed_url.format(channel)
self.logger.debug("Found embed URL: {0}", embed_url)
# page needs to have a mobile user agent
embed_page = http.get(embed_url, headers={"User-Agent": useragents.ANDROID})
m = self.embed_re.search(embed_page.text)
if m:
hls_stream_url = m.group(1)
try:
for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():
yield s
except Exception:
self.logger.error("Failed to load the HLS playlist for {0}", channel)
def _get_streams(self):
m = self.url_re.match(self.url)
if m:
channel = m.group(1) or m.group(2)
self.logger.debug("Found channel {0}", channel)
for s in self._get_hds_streams(channel):
yield s
for s in self._get_hls_streams(channel):
yield s
__plugin__ = TF1
| mmetak/streamlink | src/streamlink/plugins/tf1.py | Python | bsd-2-clause | 2,485 |
import bcrypt
def hash_password(password):
default_rounds = 14
bcrypt_salt = bcrypt.gensalt(default_rounds)
hashed_password = bcrypt.hashpw(password, bcrypt_salt)
return hashed_password
def check_password(password, hashed):
return bcrypt.checkpw(password, hashed)
| fdemian/Morpheus | api/Crypto.py | Python | bsd-2-clause | 288 |
# -*- coding: utf-8 -*-
import os
import requests
import time
import math
import datetime
import random
import envoy
import jsonfield
import logging
import urllib
from collections import defaultdict
from magic_repr import make_repr
from hashlib import md5, sha1
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, UserManager as BaseUserManager
from django.core.cache import cache
#from south.modelsinspector import add_introspection_rules
from twiggy_goodies.threading import log
from allmychanges.validators import URLValidator
from allmychanges.downloaders.utils import normalize_url
from allmychanges.issues import calculate_issue_importance
from allmychanges.utils import (
split_filenames,
parse_search_list,
get_one_or_none,
)
from allmychanges import chat
from allmychanges.downloaders import (
get_downloader)
from allmychanges.utils import reverse
from allmychanges.tasks import (
update_preview_task,
update_changelog_task)
from allmychanges.exceptions import SynonymError
MARKUP_CHOICES = (
('markdown', 'markdown'),
('rest', 'rest'),
)
NAME_LENGTH = 80
NAMESPACE_LENGTH = 80
DESCRIPTION_LENGTH = 255
PROCESSING_STATUS_LENGTH = 40
# based on http://www.caktusgroup.com/blog/2013/08/07/migrating-custom-user-model-django/
from pytz import common_timezones
TIMEZONE_CHOICES = [(tz, tz) for tz in common_timezones]
class URLField(models.URLField):
default_validators = [URLValidator()]
#add_introspection_rules([], ["^allmychanges\.models\.URLField"])
class UserManager(BaseUserManager):
def _create_user(self, username, email=None, password=None,
**extra_fields):
now = timezone.now()
email = self.normalize_email(email)
user = self.model(username=username,
email=email,
last_login=now,
date_joined=now,
**extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create(self, *args, **kwargs):
email = kwargs.get('email')
if email and self.filter(email=email).count() > 0:
raise ValueError('User with email "{0}" already exists'.format(email))
username = kwargs.get('username')
url = settings.BASE_URL + reverse('admin-user-profile',
username=username)
chat.send(('New user <{url}|{username}> '
'with email "{email}" (from create)').format(
url=url,
username=username,
email=email))
return super(UserManager, self).create(*args, **kwargs)
def create_user(self, username, email=None, password=None, **extra_fields):
if email and self.filter(email=email).count() > 0:
raise ValueError('User with email "{0}" already exists'.format(email))
url = settings.BASE_URL + reverse('admin-user-profile',
username=username)
chat.send(('New user <{url}|{username}> '
'with email "{email}" (from create_user)').format(
url=url,
username=username,
email=email))
return self._create_user(username, email, password,
**extra_fields)
def active_users(self, interval):
"""Outputs only users who was active in last `interval` days.
"""
after = timezone.now() - datetime.timedelta(interval)
queryset = self.all()
queryset = queryset.filter(history_log__action__in=ACTIVE_USER_ACTIONS,
history_log__created_at__gte=after).distinct()
return queryset
SEND_DIGEST_CHOICES = (
('daily', 'Every day'),
('weekly', 'Every week (on Monday)'),
('never', 'Never'))
RSS_HASH_LENGH = 32
class User(AbstractBaseUser):
"""
A fully featured User model with admin-compliant permissions that uses
a full-length email field as the username.
Email and password are required. Other fields are optional.
"""
username = models.CharField('user name', max_length=254, unique=True)
email = models.EmailField('email address', max_length=254)
email_is_valid = models.BooleanField(default=False)
date_joined = models.DateTimeField('date joined', default=timezone.now)
timezone = models.CharField(max_length=100,
choices=TIMEZONE_CHOICES,
default='UTC')
changelogs = models.ManyToManyField('Changelog',
through='ChangelogTrack',
related_name='trackers')
feed_versions = models.ManyToManyField('Version',
through='FeedItem',
related_name='users')
feed_sent_id = models.IntegerField(
default=0,
help_text='Keeps position in feed items already sent in digest emails')
last_digest_sent_at = models.DateTimeField(
blank=True,
null=True,
help_text='Date when last email digest was sent')
skips_changelogs = models.ManyToManyField('Changelog',
through='ChangelogSkip',
related_name='skipped_by')
moderated_changelogs = models.ManyToManyField('Changelog',
through='Moderator',
related_name='moderators')
# notification settings
send_digest = models.CharField(max_length=100,
choices=SEND_DIGEST_CHOICES,
default='daily')
slack_url = models.URLField(max_length=2000,
default='',
blank=True)
webhook_url = models.URLField(max_length=2000,
default='',
blank=True)
rss_hash = models.CharField(max_length=RSS_HASH_LENGH,
unique=True,
blank=True,
null=True)
custom_fields = jsonfield.JSONField(
default={},
help_text='Custom fields such like "Location" or "SecondEmail".',
blank=True)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = 'user'
verbose_name_plural = 'users'
__repr__ = make_repr('username', 'email')
def get_avatar(self, size):
# adorable_template = 'https://api.adorable.io/avatars/{size}/{hash}.png'
robohash_template = 'https://robohash.org/{hash}.png?size={size}x{size}'
if self.email:
hash = md5(self.email.lower()).hexdigest()
default = robohash_template.format(size=size, hash=hash)
avatar_url = 'https://www.gravatar.com/avatar/{hash}?{opts}'.format(
hash=hash,
opts=urllib.urlencode(
dict(
s=str(size),
d=default
)
)
)
else:
hash = md5(self.username).hexdigest()
avatar_url = robohash_template.format(size=size, hash=hash)
return avatar_url
@property
def is_superuser(self):
return self.username in settings.SUPERUSERS
def does_track(self, changelog):
"""Check if this user tracks given changelog."""
return self.changelogs.filter(pk=changelog.id).exists()
def track(self, changelog):
if not self.does_track(changelog):
if changelog.namespace == 'web' and changelog.name == 'allmychanges':
action = 'track-allmychanges'
action_description = 'User tracked our project\'s changelog.'
else:
action = 'track'
action_description = 'User tracked changelog:{0}'.format(changelog.id)
UserHistoryLog.write(self, '', action, action_description)
ChangelogTrack.objects.create(
user=self,
changelog=changelog)
def untrack(self, changelog):
if self.does_track(changelog):
if changelog.namespace == 'web' and changelog.name == 'allmychanges':
action = 'untrack-allmychanges'
action_description = 'User untracked our project\'s changelog.'
else:
action = 'untrack'
action_description = 'User untracked changelog:{0}'.format(changelog.id)
UserHistoryLog.write(self, '', action, action_description)
ChangelogTrack.objects.filter(
user=self,
changelog=changelog).delete()
def does_skip(self, changelog):
"""Check if this user skipped this changelog in package selector."""
return self.skips_changelogs.filter(pk=changelog.id).exists()
def skip(self, changelog):
if not self.does_skip(changelog):
action = 'skip'
action_description = 'User skipped changelog:{0}'.format(changelog.id)
UserHistoryLog.write(self, '', action, action_description)
ChangelogSkip.objects.create(
user=self,
changelog=changelog)
def add_feed_item(self, version):
if self.send_digest == 'never':
return None
return FeedItem.objects.create(user=self, version=version)
def save(self, *args, **kwargs):
if self.rss_hash is None:
self.rss_hash = sha1(self.username + settings.SECRET_KEY).hexdigest()[:RSS_HASH_LENGH]
return super(User, self).save(*args, **kwargs)
class Subscription(models.Model):
email = models.EmailField()
come_from = models.CharField(max_length=100)
date_created = models.DateTimeField()
def __unicode__(self):
return self.email
class Downloadable(object):
"""Adds method download, which uses attribute `source`
to update attribute `downloader` if needed and then to
download repository into a temporary directory.
"""
def download(self, downloader,
report_back=lambda message, level=logging.INFO: None):
"""This method fetches repository into a temporary directory
and returns path to this directory.
It can report about downloading status using callback `report_back`.
Everything what will passed to `report_back`, will be displayed to
the end user in a processing log on a "Tune" page.
"""
if isinstance(downloader, dict):
params = downloader.get('params', {})
downloader = downloader['name']
else:
params = {}
params.update(self.downloader_settings or {})
download = get_downloader(downloader)
return download(self.source,
report_back=report_back,
**params)
# A mixin to get/set ignore and check lists on a model.
def get_ignore_list(self):
"""Returns a list with all filenames and directories to ignore
when searching a changelog."""
return split_filenames(self.ignore_list)
def set_ignore_list(self, items):
self.ignore_list = u'\n'.join(items)
def get_search_list(self):
"""Returns a list with all filenames and directories to check
when searching a changelog."""
return parse_search_list(self.search_list)
def set_search_list(self, items):
def process(item):
if isinstance(item, tuple) and item[1]:
return u':'.join(item)
else:
return item
self.search_list = u'\n'.join(map(process, items))
class ChangelogManager(models.Manager):
def only_active(self):
# active changelog is good and not paused
queryset = self.good()
return queryset.filter(paused_at=None)
def good(self):
# good changelog should have namespace, name, source and downloader
return self.all().exclude(
Q(name=None) |
Q(namespace=None) |
Q(downloader=None) |
Q(source=''))
def unsuccessful(self):
return self.all().filter(
Q(name=None) |
Q(namespace=None) |
Q(downloader=None) |
Q(source=''))
class Changelog(Downloadable, models.Model):
objects = ChangelogManager()
source = URLField(db_index=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
# TODO: remove
processing_started_at = models.DateTimeField(blank=True, null=True)
problem = models.CharField(max_length=1000,
help_text='Latest error message',
blank=True, null=True)
# TODO: remove
filename = models.CharField(max_length=1000,
help_text=('If changelog was discovered, then '
'field will store it\'s filename'),
blank=True, null=True)
updated_at = models.DateTimeField(blank=True, null=True)
next_update_at = models.DateTimeField(default=timezone.now)
paused_at = models.DateTimeField(blank=True, null=True)
last_update_took = models.IntegerField(
help_text=('Number of seconds required to '
'update this changelog last time'),
default=0)
ignore_list = models.CharField(max_length=1000,
default='',
help_text=('Comma-separated list of directories'
' and filenames to ignore searching'
' changelog.'),
blank=True)
# TODO: выяснить зачем тут два поля check_list и search_list
check_list = models.CharField(max_length=1000,
default='',
help_text=('Comma-separated list of directories'
' and filenames to search'
' changelog.'),
blank=True)
search_list = models.CharField(max_length=1000,
default='',
help_text=('Comma-separated list of directories'
' and filenames to search'
' changelog.'),
blank=True)
xslt = models.TextField(default='',
help_text=('XSLT transform to be applied to all html files.'),
blank=True)
namespace = models.CharField(max_length=NAMESPACE_LENGTH, blank=True, null=True)
name = models.CharField(max_length=NAME_LENGTH, blank=True, null=True)
description = models.CharField(max_length=DESCRIPTION_LENGTH,
blank=True,
default='')
downloader = models.CharField(max_length=20, blank=True, null=True)
downloader_settings = jsonfield.JSONField(
default={},
help_text=('JSON with settings for selected downloader.'),
blank=True)
downloaders = jsonfield.JSONField(
default=[],
help_text=('JSON with guessed downloaders and their additional meta information.'),
blank=True)
status = models.CharField(max_length=40, default='created')
processing_status = models.CharField(max_length=PROCESSING_STATUS_LENGTH)
icon = models.CharField(max_length=1000,
blank=True, null=True)
class Meta:
unique_together = ('namespace', 'name')
def __unicode__(self):
return u'Changelog from {0}'.format(self.source)
__repr__ = make_repr('namespace', 'name', 'source')
def latest_versions(self, limit):
return self.versions.exclude(unreleased=True) \
.order_by('-order_idx')[:limit]
def latest_version(self):
versions = list(self.latest_versions(1))
if versions:
return versions[0]
def get_display_name(self):
return u'{0}/{1}'.format(
self.namespace,
self.name)
@staticmethod
def create_uniq_name(namespace, name):
"""Returns a name which is unique in given namespace.
Name is created by incrementing a value."""
if namespace and name:
base_name = name
counter = 0
while Changelog.objects.filter(
namespace=namespace,
name=name).exists():
counter += 1
name = '{0}{1}'.format(base_name, counter)
return name
@staticmethod
def get_all_namespaces(like=None):
queryset = Changelog.objects.all()
if like is not None:
queryset = queryset.filter(
namespace__iexact=like
)
return list(queryset.values_list('namespace', flat=True).distinct())
@staticmethod
def normalize_namespaces():
namespaces_usage = defaultdict(int)
changelogs_with_namespaces = Changelog.objects.exclude(namespace=None)
for namespace in changelogs_with_namespaces.values_list('namespace', flat=True):
namespaces_usage[namespace] += 1
def normalize(namespace):
lowercased = namespace.lower()
# here we process only capitalized namespaces
if namespace == lowercased:
return
# if there lowercased is not used at all
if lowercased not in namespaces_usage:
return
lowercased_count = namespaces_usage[lowercased]
this_count = namespaces_usage[namespace]
if lowercased_count >= this_count:
# if num of occurences is equal,
# prefer lowercased name
Changelog.objects.filter(
namespace=namespace).update(
namespace=lowercased)
else:
Changelog.objects.filter(
namespace=lowercased).update(
namespace=namespace)
del namespaces_usage[namespace]
del namespaces_usage[lowercased]
all_namespaces = namespaces_usage.keys()
all_namespaces.sort()
for namespace in all_namespaces:
normalize(namespace)
def save(self, *args, **kwargs):
if self.id is None:
# than objects just created and this is good
# time to fix it's namespace
existing_namespaces = Changelog.get_all_namespaces(like=self.namespace)
if existing_namespaces:
self.namespace = existing_namespaces[0]
return super(Changelog, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('project',
namespace=self.namespace,
name=self.name)
def editable_by(self, user, light_user=None):
light_moderators = set(self.light_moderators.values_list('light_user', flat=True))
moderators = set(self.moderators.values_list('id', flat=True))
if user.is_authenticated():
# Any changelog could be edited by me
if user.is_superuser:
return True
if moderators or light_moderators:
return user.id in moderators
else:
if moderators or light_moderators:
return light_user in light_moderators
return True
def is_unsuccessful(self):
return self.name is None or \
self.namespace is None or \
self.downloader is None or \
not self.source
def is_moderator(self, user, light_user=None):
light_moderators = set(self.light_moderators.values_list('light_user', flat=True))
moderators = set(self.moderators.values_list('id', flat=True))
if user.is_authenticated():
return user.id in moderators
else:
return light_user in light_moderators
def add_to_moderators(self, user, light_user=None):
"""Adds user to moderators and returns 'normal' or 'light'
if it really added him.
In case if user already was a moderator, returns None."""
if not self.is_moderator(user, light_user):
if user.is_authenticated():
Moderator.objects.create(changelog=self, user=user)
return 'normal'
else:
if light_user is not None:
self.light_moderators.create(light_user=light_user)
return 'light'
def create_issue(self, type, comment='', related_versions=[]):
joined_versions = u', '.join(related_versions)
# for some types, only one issue at a time is allowed
if type == 'lesser-version-count':
if self.issues.filter(type=type, resolved_at=None, related_versions=joined_versions).count() > 0:
return
issue = self.issues.create(type=type,
comment=comment.format(related_versions=joined_versions),
related_versions=joined_versions)
chat.send(u'New issue of type "{issue.type}" with comment: "{issue.comment}" was created for <https://allmychanges.com/issues/?namespace={issue.changelog.namespace}&name={issue.changelog.name}|{issue.changelog.namespace}/{issue.changelog.name}>'.format(
issue=issue))
def resolve_issues(self, type):
self.issues.filter(type=type, resolved_at=None).update(resolved_at=timezone.now())
def create_preview(self, user, light_user, **params):
params.setdefault('downloader', self.downloader)
params.setdefault('downloader_settings', self.downloader_settings)
params.setdefault('downloaders', self.downloaders)
params.setdefault('source', self.source)
params.setdefault('search_list', self.search_list)
params.setdefault('ignore_list', self.ignore_list)
params.setdefault('xslt', self.xslt)
preview = self.previews.create(user=user, light_user=light_user, **params)
# preview_test_task.delay(
# preview.id,
# ['Guessing downloders',
# 'Downloading using git',
# 'Searching versions',
# 'Nothing found',
# 'Downloading from GitHub Review',
# 'Searching versions',
# 'Some results were found'])
return preview
def set_status(self, status, **kwargs):
changed_fields = ['status', 'updated_at']
if status == 'error':
self.problem = kwargs.get('problem')
changed_fields.append('problem')
self.status = status
self.updated_at = timezone.now()
self.save(update_fields=changed_fields)
def set_processing_status(self, status, level=logging.INFO):
self.processing_status = status[:PROCESSING_STATUS_LENGTH]
self.updated_at = timezone.now()
self.save(update_fields=('processing_status',
'updated_at'))
key = 'preview-processing-status:{0}'.format(self.id)
cache.set(key, status, 10 * 60)
def get_processing_status(self):
key = 'preview-processing-status:{0}'.format(self.id)
result = cache.get(key, self.processing_status)
return result
def calc_next_update(self):
"""Returns date and time when next update should be scheduled.
"""
hour = 60 * 60
min_update_interval = hour
max_update_interval = 48 * hour
num_trackers = self.trackers.count()
# here we divide max interval on 2 because
# on the last stage will add some randomness to
# the resulting value
time_to_next_update = (max_update_interval / 2) / math.log(max(math.e,
num_trackers))
time_to_next_update = max(min_update_interval,
time_to_next_update,
2 * self.last_update_took)
# add some randomness
time_to_next_update = random.randint(
int(time_to_next_update * 0.8),
int(time_to_next_update * 2.0))
# limit upper bound
return timezone.now() + datetime.timedelta(0, time_to_next_update)
def calc_next_update_if_error(self):
# TODO: check and remove
return timezone.now() + datetime.timedelta(0, 1 * 60 * 60)
def schedule_update(self, async=True, full=False):
with log.fields(changelog_name=self.name,
changelog_namespace=self.namespace,
async=async,
full=full):
log.info('Scheduling changelog update')
self.set_status('processing')
self.set_processing_status('Waiting in the queue')
self.problem = None
self.save()
if full:
self.versions.all().delete()
if async:
update_changelog_task.delay(self.id)
else:
update_changelog_task(self.id)
def resume(self):
self.paused_at = None
self.next_update_at = timezone.now()
# we don't need to save here, because this will be done in schedule_update
self.schedule_update()
def clean(self):
super(Changelog, self).clean()
self.source, _, _ = normalize_url(self.source, for_checkout=False)
def update_description_from_source(self, fall_asleep_on_rate_limit=False):
# right now this works only for github urls
if 'github.com' not in self.source:
return
url, username, repo = normalize_url(self.source)
url = 'https://api.github.com/repos/{0}/{1}'.format(username, repo)
headers={'Authorization': 'token ' + settings.GITHUB_TOKEN}
response = requests.get(url, headers=headers)
if response.status_code == 200:
data = response.json()
self.description = data.get('description', '')
self.save(update_fields=('description', ))
if fall_asleep_on_rate_limit:
remaining = int(response.headers['x-ratelimit-remaining'])
if remaining == 1:
to_sleep = int(response.headers['x-ratelimit-reset']) - time.time() + 10
print 'OK, now I need to sleep {0} seconds because of GitHub\'s rate limit.'.format(to_sleep)
time.sleep(to_sleep)
def add_synonym(self, synonym):
"""Just a shortcut."""
if self.synonyms.filter(source=synonym).count() == 0:
# if this synonym already bound to some another project
# then raise exception
found = list(SourceSynonym.objects.filter(source=synonym))
if found:
with log.fields(changelog_id=self.pk,
another_changelog_id=found[0].changelog_id):
raise SynonymError('Synonym already bound to a changelog')
found = list(Changelog.objects.filter(source=synonym))
if found:
with log.fields(changelog_id=self.pk,
another_changelog_id=found[0].pk):
raise SynonymError('Synonym matches a changelog\'s source')
self.synonyms.create(source=synonym)
def merge_into(self, to_ch):
# move trackers
to_ch_trackers = set(to_ch.trackers.values_list('id', flat=True))
for user in self.trackers.all():
if user.id not in to_ch_trackers:
ChangelogTrack.objects.create(user=user, changelog=to_ch)
action = 'moved-during-merge'
action_description = 'User was moved from {0}/{1} to changelog:{2}'.format(
self.namespace,
self.name,
to_ch.id)
UserHistoryLog.write(user, '', action, action_description)
# move issues
for issue in self.issues.all():
issue.changelog = to_ch
issue.save(update_fields=('changelog',))
# remove itself
Changelog.objects.filter(pk=self.pk).delete()
# add synonym
to_ch.add_synonym(self.source)
def set_tag(self, user, name, version_number):
"""Sets or updates tag with `name` on the version.
If tag was updated, returns 'updated'
otherwise, returns 'created'
"""
assert isinstance(version_number, basestring), \
'Parameter "version_number" should be a string, not "{0}"'.format(
type(version_number))
params = dict(user=user, name=name)
existing_tag = self.tags.filter(
**params)
update = existing_tag.count() > 0
if update:
existing_tag.delete()
version = get_one_or_none(self.versions, number=version_number)
self.tags.create(version=version,
version_number=version_number,
**params)
return 'updated' if update else 'created'
def remove_tag(self, user, name):
"""Removes tag with `name` on the version.
"""
self.tags.filter(user=user, name=name).delete()
class SourceSynonym(models.Model):
changelog = models.ForeignKey(Changelog, related_name='synonyms')
created_at = models.DateTimeField(default=timezone.now)
source = URLField(unique=True)
class ChangelogTrack(models.Model):
user = models.ForeignKey(User)
changelog = models.ForeignKey(Changelog)
created_at = models.DateTimeField(default=timezone.now)
class ChangelogSkip(models.Model):
user = models.ForeignKey(User)
changelog = models.ForeignKey(Changelog)
created_at = models.DateTimeField(default=timezone.now)
class Issue(models.Model):
"""Keeps track any issues, related to a changelog.
"""
changelog = models.ForeignKey(Changelog,
related_name='issues',
blank=True,
null=True)
user = models.ForeignKey(User,
related_name='issues',
blank=True,
null=True)
light_user = models.CharField(max_length=40, blank=True, null=True)
type = models.CharField(max_length=40)
comment = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
resolved_at = models.DateTimeField(blank=True, null=True)
resolved_by = models.ForeignKey(User,
related_name='resolved_issues',
blank=True,
null=True)
related_versions = models.TextField(default='', blank=True,
help_text='Comma-separated list of versions, related to this issue')
email = models.CharField(max_length=100, blank=True, null=True)
page = models.CharField(max_length=100, blank=True, null=True)
importance = models.IntegerField(db_index=True, blank=True, default=0)
__repr__ = make_repr('changelog', 'type', 'comment', 'created_at', 'resolved_at')
def save(self, *args, **kwargs):
if not self.importance:
self.importance = calculate_issue_importance(
num_trackers=self.changelog.trackers.count()
if self.changelog
else 0,
user=self.user,
light_user=self.light_user)
return super(Issue, self).save(*args, **kwargs)
@staticmethod
def merge(user, light_user):
entries = Issue.objects.filter(user=None,
light_user=light_user)
if entries.count() > 0:
with log.fields(username=user.username,
num_entries=entries.count(),
light_user=light_user):
log.info('Merging issues')
entries.update(user=user)
def editable_by(self, user, light_user=None):
return self.changelog.editable_by(user, light_user)
def get_related_versions(self):
response = [version.strip()
for version in self.related_versions.split(',')]
return filter(None, response)
def get_related_deployments(self):
return DeploymentHistory.objects \
.filter(deployed_at__lte=self.created_at) \
.order_by('-id')[:3]
def resolve(self, user, notify=True):
self.resolved_at = timezone.now()
self.resolved_by = user
self.save(update_fields=('resolved_at', 'resolved_by'))
if notify:
chat.send((u'Issue <https://allmychanges.com{url}|#{issue_id}> '
u'for {namespace}/{name} was resolved by {username}.').format(
url=reverse('issue-detail', pk=self.id),
issue_id=self.id,
namespace=self.changelog.namespace,
name=self.changelog.name,
username=user.username))
if self.type == 'auto-paused':
changelog = self.changelog
with log.fields(changelog_id=changelog.id):
log.info('Resuming changelog updates')
changelog.resume()
if notify:
chat.send(u'Autopaused package {namespace}/{name} was resumed {username}.'.format(
namespace=changelog.namespace,
name=changelog.name,
username=user.username))
class IssueComment(models.Model):
issue = models.ForeignKey(Issue, related_name='comments')
user = models.ForeignKey(User, blank=True, null=True,
related_name='issue_comments')
created_at = models.DateTimeField(default=timezone.now)
message = models.TextField()
class DiscoveryHistory(models.Model):
"""Keeps track any issues, related to a changelog.
"""
changelog = models.ForeignKey(Changelog,
related_name='discovery_history')
discovered_versions = models.TextField()
new_versions = models.TextField()
num_discovered_versions = models.IntegerField()
num_new_versions = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
__repr__ = make_repr('discovered_versions')
class LightModerator(models.Model):
"""These entries are created when anonymouse user
adds another package into the system.
When user signs up, these entries should be
transformed into the Moderator entries.
"""
changelog = models.ForeignKey(Changelog,
related_name='light_moderators')
light_user = models.CharField(max_length=40)
created_at = models.DateTimeField(auto_now_add=True)
@staticmethod
def merge(user, light_user):
entries = LightModerator.objects.filter(light_user=light_user)
for entry in entries:
with log.fields(username=user.username,
light_user=light_user):
log.info('Transforming light moderator into the permanent')
Moderator.objects.create(
changelog=entry.changelog,
user=user,
from_light_user=light_user)
entries.delete()
@staticmethod
def remove_stale_moderators():
LightModerator.objects.filter(
created_at__lte=timezone.now() - datetime.timedelta(1)).delete()
class Moderator(models.Model):
changelog = models.ForeignKey(Changelog, related_name='+')
user = models.ForeignKey(User, related_name='+')
created_at = models.DateTimeField(auto_now_add=True)
from_light_user = models.CharField(max_length=40, blank=True, null=True)
class Preview(Downloadable, models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='previews',
blank=True,
null=True)
changelog = models.ForeignKey(Changelog,
related_name='previews')
light_user = models.CharField(max_length=40)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(blank=True, null=True)
source = models.URLField()
ignore_list = models.CharField(max_length=1000,
default='',
help_text=('Comma-separated list of directories'
' and filenames to ignore searching'
' changelog.'),
blank=True)
# TODO: remove this field after migration on production
check_list = models.CharField(max_length=1000,
default='',
help_text=('Comma-separated list of directories'
' and filenames to search'
' changelog.'),
blank=True)
search_list = models.CharField(max_length=1000,
default='',
help_text=('Comma-separated list of directories'
' and filenames to search'
' changelog.'),
blank=True)
xslt = models.TextField(default='',
help_text=('XSLT transform to be applied to all html files.'),
blank=True)
problem = models.CharField(max_length=1000,
help_text='Latest error message',
blank=True, null=True)
downloader = models.CharField(max_length=255, blank=True, null=True)
downloader_settings = jsonfield.JSONField(
default={},
help_text=('JSON with settings for selected downloader.'),
blank=True)
downloaders = jsonfield.JSONField(
default=[],
help_text=('JSON with guessed downloaders and their additional meta information.'),
blank=True)
done = models.BooleanField(default=False)
status = models.CharField(max_length=40, default='created')
processing_status = models.CharField(max_length=40)
log = jsonfield.JSONField(default=[],
help_text=('JSON with log of all operation applied during preview processing.'),
blank=True)
@property
def namespace(self):
return self.changelog.namespace
@property
def name(self):
return self.changelog.name
@property
def description(self):
return self.changelog.description
def set_status(self, status, **kwargs):
changed_fields = ['status', 'updated_at']
if status == 'processing':
self.versions.all().delete()
self.updated_at = timezone.now()
changed_fields.append('updated_at')
elif status == 'error':
self.problem = kwargs.get('problem')
changed_fields.append('problem')
self.status = status
self.updated_at = timezone.now()
self.save(update_fields=changed_fields)
def set_processing_status(self, status, level=logging.INFO):
self.log.append(status)
self.processing_status = status[:PROCESSING_STATUS_LENGTH]
self.updated_at = timezone.now()
self.save(update_fields=('processing_status',
'updated_at',
'log'))
key = 'preview-processing-status:{0}'.format(self.id)
cache.set(key, status, 10 * 60)
def get_processing_status(self):
key = 'preview-processing-status:{0}'.format(self.id)
result = cache.get(key, self.processing_status)
return result
def schedule_update(self):
self.set_status('processing')
self.set_processing_status('Waiting in the queue')
self.versions.all().delete()
update_preview_task.delay(self.pk)
class VersionManager(models.Manager):
use_for_related_fields = True
def create(self, *args, **kwargs):
version = super(VersionManager, self).create(*args, **kwargs)
changelog = kwargs.get('changelog')
if changelog:
version.associate_with_free_tags()
return version
def released(self):
return self.exclude(unreleased=True)
def unreleased(self):
return self.filter(unreleased=True)
class Version(models.Model):
changelog = models.ForeignKey(Changelog,
related_name='versions',
blank=True,
null=True,
on_delete=models.SET_NULL)
preview = models.ForeignKey(Preview,
related_name='versions',
blank=True,
null=True,
on_delete=models.SET_NULL)
date = models.DateField(blank=True, null=True)
number = models.CharField(max_length=255)
unreleased = models.BooleanField(default=False)
discovered_at = models.DateTimeField(blank=True, null=True)
last_seen_at = models.DateTimeField(blank=True, null=True)
filename = models.CharField(max_length=1000,
help_text=('Source file where this version was found'),
blank=True, null=True)
raw_text = models.TextField(blank=True, null=True)
processed_text = models.TextField(blank=True, null=True)
order_idx = models.IntegerField(blank=True, null=True,
help_text=('This field is used to reorder versions '
'according their version numbers and to '
'fetch them from database efficiently.'))
tweet_id = models.CharField(max_length=1000,
help_text=('Tweet id or None if we did not tweeted about this version yet.'),
blank=True,
null=True)
objects = VersionManager()
class Meta:
get_latest_by = 'order_idx'
ordering = ['-order_idx']
def __unicode__(self):
return self.number
def get_absolute_url(self):
return self.changelog.get_absolute_url() + '#' + self.number
def post_tweet(self):
if not settings.TWITTER_CREDS:
return
if self.unreleased:
raise RuntimeError('Unable to tweet about unreleased version')
if self.tweet_id:
return # because we already posted a tweet
ch = self.changelog
image_url = settings.BASE_URL + ch.get_absolute_url() \
+ '?snap=1&version=' + self.number
filename = sha1(image_url).hexdigest() + '.png'
full_path = os.path.join(settings.SNAPSHOTS_ROOT, filename)
result = envoy.run(
'{root}/makescreenshot --width 590 --height 600 {url} {path}'.format(
root=settings.PROJECT_ROOT,
url=image_url,
path=full_path))
if result.status_code != 0:
with log.fields(
status_code=result.status_code,
std_out=result.std_out,
std_err=result.std_err):
log.error('Unable to make a screenshot')
raise RuntimeError('Unable to make a screenshot')
with open(full_path, 'rb') as f:
from requests_oauthlib import OAuth1
auth = OAuth1(*settings.TWITTER_CREDS)
response = requests.post(
'https://upload.twitter.com/1.1/media/upload.json',
auth=auth,
files={'media': ('screenshot.png',
f.read(), 'image/png')})
media_id = response.json()['media_id_string']
url = settings.BASE_URL + self.get_absolute_url()
text = '{number} of {namespace}/{name} was released: {url} #{namespace} #{name} #release'.format(
number=self.number,
namespace=ch.namespace,
name=ch.name,
url=url)
response = requests.post(
'https://api.twitter.com/1.1/statuses/update.json',
auth=auth,
data={'status': text,
'media_ids': media_id})
if response.status_code == 200:
self.tweet_id = response.json()['id_str']
self.save(update_fields=('tweet_id',))
return full_path
def set_tag(self, user, name):
"""Convenience method to set tag on just this version.
"""
self.changelog.set_tag(user, name, self.number)
def associate_with_free_tags(self):
# associate free tags with this version
for tag in self.changelog.tags.filter(version_number=self.number):
tag.version = self
tag.save(update_fields=('version',))
class Tag(models.Model):
# this field shouldn't be blank or null
# but I have to make it so, because otherwise
# DB migrations wasn't possible
changelog = models.ForeignKey(Changelog,
blank=True,
null=True,
related_name='tags')
# tag may be tied to a version in the database,
# but in some cases, we may don't have parsed version
# with given number
version = models.ForeignKey(Version,
blank=True,
null=True,
related_name='tags')
user = models.ForeignKey(User, related_name='tags')
# regex=ur'[a-z][a-z0-9-]*[a-z0-9]'
name = models.CharField(max_length=40)
# we have not any restrictions on the format of this field
# this could be any string even something like 'latest'
version_number = models.CharField(max_length=40)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ('changelog', 'user', 'name')
def get_absolute_url(self):
# the name shouldn't contain any unicode or nonascii letters nor spaces
# otherwise, we need to encode tu utf-8 and quote_plus it.
return self.changelog.get_absolute_url() + '#' + self.name
__repr__ = make_repr('name', 'version_number')
class FeedItem(models.Model):
user = models.ForeignKey(User)
version = models.ForeignKey(Version, related_name='feed_items')
created_at = models.DateTimeField(auto_now_add=True)
ACTIVE_USER_ACTIONS = (
u'landing-digest-view', u'landing-track', u'landing-ignore',
u'login', u'profile-update', u'digest-view',
u'package-view', u'package-create', u'package-edit',
u'edit-digest-view', u'index-view', u'track', u'untrack',
u'untrack-allmychanges', u'create-issue',
u'email-digest-open', u'email-digest-click')
class UserHistoryLog(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='history_log',
blank=True,
null=True)
light_user = models.CharField(max_length=40)
action = models.CharField(max_length=40)
description = models.CharField(max_length=1000)
created_at = models.DateTimeField(auto_now_add=True)
@staticmethod
def merge(user, light_user):
entries = UserHistoryLog.objects.filter(user=None,
light_user=light_user)
if entries.count() > 0:
with log.fields(username=user.username,
num_entries=entries.count(),
light_user=light_user):
log.info('Merging user history logs')
entries.update(user=user)
@staticmethod
def write(user, light_user, action, description):
user = user if user is not None and user.is_authenticated() else None
return UserHistoryLog.objects.create(user=user,
light_user=light_user,
action=action,
description=description)
class UserStateHistory(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='state_history')
date = models.DateField()
state = models.CharField(max_length=40)
class DeploymentHistory(models.Model):
hash = models.CharField(max_length=32, default='')
description = models.TextField()
deployed_at = models.DateTimeField(auto_now_add=True)
__repr__ = make_repr('deployed_at', 'hash')
class EmailVerificationCode(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL,
related_name='email_verification_code')
hash = models.CharField(max_length=32, default='')
deployed_at = models.DateTimeField(auto_now_add=True)
@staticmethod
def new_code_for(user):
hash = md5(str(time.time()) + settings.SECRET_KEY).hexdigest()
try:
code = user.email_verification_code
code.hash = hash
code.save()
except EmailVerificationCode.DoesNotExist:
code = EmailVerificationCode.objects.create(
user=user,
hash=hash)
return code
AUTOCOMPLETE_TYPES = (
('source', 'Source URL'),
('namespace', 'Namespace'),
('package', 'Package'))
AUTOCOMPLETE_ORIGINS = (
('app-store', 'App Store'),
('pypi', 'PyPi'))
COMMON_WORDS = set('a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your'.split(','))
class AutocompleteData(models.Model):
origin = models.CharField(max_length=100,
choices=AUTOCOMPLETE_ORIGINS)
title = models.CharField(max_length=255)
description = models.CharField(max_length=DESCRIPTION_LENGTH,
default='')
type = models.CharField(max_length=10,
choices=AUTOCOMPLETE_TYPES)
source = models.CharField(max_length=255, # we need this because MySQL will output warning and break our migrations for greater length
blank=True, null=True,
db_index=True)
icon = models.CharField(max_length=255,
blank=True, null=True)
changelog = models.ForeignKey(Changelog,
blank=True, null=True,
related_name='autocomplete')
score = models.IntegerField(default=0,
help_text=('A value from 0 to infinity. '
'Items with bigger values '
'should appear at the top '
'of the suggest.'))
__repr__ = make_repr('title')
def save(self, *args, **kwargs):
super(AutocompleteData, self).save(*args, **kwargs)
if self.words.count() == 0:
self.add_words()
def add_words(self, db_name='default'):
if db_name == 'default':
data = self
else:
data = AutocompleteData.objects.using(db_name).get(pk=self.pk)
words = data.title.split()
words = (word.strip() for word in words)
words = set(word.lower() for word in words if len(word) > 3)
words -= COMMON_WORDS
words.add(data.title.lower())
words = [AutocompleteWord2.objects.using(db_name).get_or_create(word=word)[0]
for word in words]
data.words2.add(*words)
class AutocompleteWord(models.Model):
word = models.CharField(max_length=100, db_index=True)
data = models.ForeignKey(AutocompleteData,
related_name='words')
__repr__ = make_repr('word')
class AutocompleteWord2(models.Model):
word = models.CharField(max_length=100, unique=True)
data_objects = models.ManyToManyField(
AutocompleteData,
related_name='words2')
__repr__ = make_repr('word')
class AppStoreBatch(models.Model):
"""To identify separate processing batches.
"""
created = models.DateTimeField(auto_now_add=True)
__repr__ = make_repr()
class AppStoreUrl(models.Model):
"""This model is used when we are fetching
data from app store for our autocomplete.
Use management command update_appstore_urls to populate this collection.
"""
# we need this because MySQL will output warning and break our migrations for greater length
source = models.CharField(max_length=255,
blank=True, null=True,
unique=True)
autocomplete_data = models.OneToOneField(AutocompleteData,
blank=True, null=True,
related_name='appstore_url',
on_delete=models.SET_NULL)
batch = models.ForeignKey(AppStoreBatch,
blank=True, null=True,
related_name='urls',
on_delete=models.SET_NULL)
rating = models.FloatField(blank=True, null=True)
rating_count = models.IntegerField(blank=True, null=True)
__repr__ = make_repr('source')
class MandrillMessage(models.Model):
mid = models.CharField(max_length=32,
help_text='Mandrills ID',
db_index=True)
timestamp = models.IntegerField()
email = models.EmailField()
user = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='mandrill_messages',
on_delete=models.SET_NULL,
blank=True,
null=True)
payload = models.TextField()
__repr__ = make_repr('mid', 'email')
| AllMyChanges/allmychanges.com | allmychanges/models.py | Python | bsd-2-clause | 55,463 |
# Copyright (c) 2012-2021, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Shield"
prefix = "shield"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AssociateDRTLogBucket = Action("AssociateDRTLogBucket")
AssociateDRTRole = Action("AssociateDRTRole")
AssociateHealthCheck = Action("AssociateHealthCheck")
AssociateProactiveEngagementDetails = Action("AssociateProactiveEngagementDetails")
CreateProtection = Action("CreateProtection")
CreateProtectionGroup = Action("CreateProtectionGroup")
CreateSubscription = Action("CreateSubscription")
DeleteProtection = Action("DeleteProtection")
DeleteProtectionGroup = Action("DeleteProtectionGroup")
DeleteSubscription = Action("DeleteSubscription")
DescribeAttack = Action("DescribeAttack")
DescribeAttackStatistics = Action("DescribeAttackStatistics")
DescribeDRTAccess = Action("DescribeDRTAccess")
DescribeEmergencyContactSettings = Action("DescribeEmergencyContactSettings")
DescribeProtection = Action("DescribeProtection")
DescribeProtectionGroup = Action("DescribeProtectionGroup")
DescribeSubscription = Action("DescribeSubscription")
DisableApplicationLayerAutomaticResponse = Action(
"DisableApplicationLayerAutomaticResponse"
)
DisableProactiveEngagement = Action("DisableProactiveEngagement")
DisassociateDRTLogBucket = Action("DisassociateDRTLogBucket")
DisassociateDRTRole = Action("DisassociateDRTRole")
DisassociateHealthCheck = Action("DisassociateHealthCheck")
EnableApplicationLayerAutomaticResponse = Action(
"EnableApplicationLayerAutomaticResponse"
)
EnableProactiveEngagement = Action("EnableProactiveEngagement")
GetSubscriptionState = Action("GetSubscriptionState")
ListAttacks = Action("ListAttacks")
ListProtectionGroups = Action("ListProtectionGroups")
ListProtections = Action("ListProtections")
ListResourcesInProtectionGroup = Action("ListResourcesInProtectionGroup")
ListTagsForResource = Action("ListTagsForResource")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateApplicationLayerAutomaticResponse = Action(
"UpdateApplicationLayerAutomaticResponse"
)
UpdateEmergencyContactSettings = Action("UpdateEmergencyContactSettings")
UpdateProtectionGroup = Action("UpdateProtectionGroup")
UpdateSubscription = Action("UpdateSubscription")
| cloudtools/awacs | awacs/shield.py | Python | bsd-2-clause | 2,682 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20150326_1433'),
]
operations = [
migrations.RemoveField(
model_name='problem',
name='id',
),
migrations.AlterField(
model_name='problem',
name='problemId',
field=models.IntegerField(serialize=False, primary_key=True),
preserve_default=True,
),
]
| shanzi/tchelper | api/migrations/0003_auto_20150326_1435.py | Python | bsd-2-clause | 560 |
# DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
from app import app
import gevent
from gevent.pywsgi import WSGIServer
from gevent.pool import Pool
from gevent import monkey
import signal
monkey.patch_all()
server = WSGIServer(('', 5000), app, spawn=Pool(None))
def stop():
server.stop()
gevent.signal(signal.SIGINT, stop)
if __name__ == "__main__":
server.serve_forever()
| Jumpscale/go-raml | codegen/fixtures/congo/python_server/server.py | Python | bsd-2-clause | 425 |
"""
"""
# Created on 2016.08.09
#
# Author: Giovanni Cannata
#
# Copyright 2016, 2017 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from ... import SEQUENCE_TYPES, STRING_TYPES
from .formatters import format_time
from ...utils.conv import to_raw
# Validators return True if value is valid, False if value is not valid,
# or a value different from True and False that is a valid value to substitute to the input value
def check_type(input_value, value_type):
if isinstance(input_value, value_type):
return True
if isinstance(input_value, SEQUENCE_TYPES):
for value in input_value:
if not isinstance(value, value_type):
return False
return True
return False
def always_valid(input_value):
return True
def validate_generic_single_value(input_value):
if not isinstance(input_value, SEQUENCE_TYPES):
return True
try: # object couldn't have a __len__ method
if len(input_value) == 1:
return True
except Exception:
pass
return False
def validate_integer(input_value):
if check_type(input_value, (float, bool)):
return False
if str is bytes: # Python 2, check for long too
if check_type(input_value, (int, long)):
return True
else: # Python 3, int only
if check_type(input_value, int):
return True
sequence = True # indicates if a sequence must be returned
if not isinstance(input_value, SEQUENCE_TYPES):
sequence = False
input_value = [input_value]
else:
sequence = True # indicates if a sequence must be returned
valid_values = [] # builds a list of valid int values
for element in input_value:
try: # try to convert any type to int, an invalid conversion raise TypeError of ValueError, if both are valid and equal then then int() value is used
float_value = float(element)
int_value = int(element)
if float_value == int_value:
valid_values.append(int(element))
else:
return False
except (ValueError, TypeError):
return False
if sequence:
return valid_values
else:
return valid_values[0]
def validate_bytes(input_value):
return check_type(input_value, bytes)
def validate_boolean(input_value):
# it could be a real bool or the string TRUE or FALSE, # only a single valued is allowed
if validate_generic_single_value(input_value): # valid only if a single value or a sequence with a single element
if isinstance(input_value, SEQUENCE_TYPES):
input_value = input_value[0]
if isinstance(input_value, bool):
if input_value:
return 'TRUE'
else:
return 'FALSE'
if isinstance(input_value, STRING_TYPES):
if input_value.lower() == 'true':
return 'TRUE'
elif input_value.lower() == 'false':
return 'FALSE'
return False
def validate_time(input_value):
# if datetime object doesn't have a timezone it's considered local time and is adjusted to UTC
if not isinstance(input_value, SEQUENCE_TYPES):
sequence = False
input_value = [input_value]
else:
sequence = True # indicates if a sequence must be returned
valid_values = []
changed = False
for element in input_value:
if isinstance(element, STRING_TYPES): # tries to check if it is already be a Generalized Time
if isinstance(format_time(to_raw(element)), datetime): # valid Generalized Time string
valid_values.append(element)
else:
return False
elif isinstance(element, datetime):
changed = True
if element.tzinfo: # a datetime with a timezone
valid_values.append(element.strftime('%Y%m%d%H%M%S%z'))
else: # datetime without timezone, assumed local and adjusted to UTC
offset = datetime.now() - datetime.utcnow()
valid_values.append((element - offset).strftime('%Y%m%d%H%M%SZ'))
else:
return False
if changed:
if sequence:
return valid_values
else:
return valid_values[0]
else:
return True
| Varbin/EEH | _vendor/ldap3/protocol/formatters/validators.py | Python | bsd-2-clause | 5,077 |
#!/usr/bin/env python
"""
LeBLEU - Letter-edit / Levenshtein BLEU
"""
import logging
#__all__ = []
__version__ = '0.0.1'
__author__ = 'Stig-Arne Gronroos'
__author_email__ = "[email protected]"
_logger = logging.getLogger(__name__)
def get_version():
return __version__
# The public api imports need to be at the end of the file,
# so that the package global names are available to the modules
# when they are imported.
from .lebleu import LeBLEU
# Convenience functions
def eval_single(*args, **kwargs):
lb = LeBLEU(**kwargs)
return lb.eval_single(*args)
def eval(*args, **kwargs):
lb = LeBLEU(**kwargs)
return lb.eval(*args)
| Waino/LeBLEU | lebleu/__init__.py | Python | bsd-2-clause | 670 |
import cStringIO
import zlib
import wx
#----------------------------------------------------------------------
def getMailData():
return zlib.decompress(
"x\xda\x01M\x01\xb2\xfe\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\
\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\
\x08d\x88\x00\x00\x01\x04IDATX\x85\xed\x941\x0e\x82@\x10E\x9f\xc6`,\x88\xad\
\x8d\x8d\x89r\x02B\xc1\t\xbc\x94\x857\xf04\x9e\xc0C\x00\x95\xb1\xb1\xa52\xda\
h\xc1N\xe1\xc8f5j\x9cD^Ev\x98\x81\xffv\x01::\xfe\x9d^\x91e\xd7\xb6\xc2d\xb9\
\x04`\xb8X\xbc\xf5\x80sY\x02p\xdcn[\xeb\xfd\xb7\xa6\x7f\x80\x81\xaf o<O\xd3f\
\xc1\x19y\x1a\xd7\xbf\xf7$\x17\xec\x19\x90\xbd?\x15\x05\x00\xd5z\r\xc0\\n\
\x08\x99p\x89\xa5o<\x9b\x010J\x12\xe0\xf1,\xd83\x10\xafV\xcd\x85K \x04M\x04\
\x92\xcb\\\xfb\x06\x84\xa7M\xa8u_r\x1fv\r\x08\xb1\xfc\x07\x14\x952\xf3\x90\
\xdc\xd3\xa71l\xe0p\x00\xe0R\xd7@8\x91N.}\x91\x9b\xc3t\xda\xdag\xd0\x80$\xdf\
\xed\x00\x88\xf2\xbcYw\tb\xf9\xfe\xd5\x19\xd0\xa7=\xf2\xcdQ\xd83\xe0K\xae\t}\
\xdf\xd2'sd\xae\xc6\x9e\x81P\xf2\x97Q&\xd8l\xee\xca\xf6\x0c\xf8\xf6\xea[\xfc\
\xdc@G\xc7\rv\x18V\xd3#+\xef\x8c\x00\x00\x00\x00IEND\xaeB`\x82\xb38\x8e\xb0"\
)
def getMailBitmap():
return wx.BitmapFromImage(getMailImage())
def getMailImage():
stream = cStringIO.StringIO(getMailData())
return wx.ImageFromStream(stream)
def getMailIcon():
icon = wx.EmptyIcon()
icon.CopyFromBitmap(getMailBitmap())
return icon
#----------------------------------------------------------------------
def getNoMailData():
return zlib.decompress(
'x\xda\x01G\x04\xb8\xfb\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\
\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\
\x08d\x88\x00\x00\x03\xfeIDATX\x85\xed\x97[o\xdb8\x10F\x0fu\xa1$\xeb\x96(A\
\x92\x1a}\xe8\xcf\xdc\xdd?\xeb\xa0h\x12\'\xa9#;\xba\x8b\x12\xb5\x0f\x81\x88\
\xba\xb6w\xb37\xf4a;\x80!\x98\xb09gf8\xdfPBX6?\xd2\xac\x1f\xea\xfd\'\xc0O\
\x00\xc0\xf9\xed\xd7_\xa6i\x9a\xf6\x16\xb3,\xe3\xea\xea\x8a8\x8eY,\x16X\xd6\
\xdf\xe3\x1c\xc7\x91\xba\xae\xa9\xaa\x8a\xa7\xa7\'6\x9b\xcd!@\x92$\x07\x8b\
\xbe\xef\x9b\xe7\xe5\xe5%a\x18"\xa5\xc4\xb6\xdf\xd7\xb2\xe38\xd2\xf7=UU\xf1\
\xf8\xf8HUUx\x9eG\x9a\xa6\x87\x00\xc76\xa8\xeb\x9a\xae\xeb\xf0}\x9f\xeb\xebk\
\xc20$MS\\\xd7}\x17\x80R\x8a\xddnG]\xd7\x94e\xc9\xd3\xd3\x13\xe38\x1e\xfd\
\xed\x1e\x80\x94\x12\xdf\xf7\xd1Z3\x0c\x03M\xd3\xb0^\xaf\x11B\xe0\xba.q\x1c#\
\xa5\xc4q\x8er3\x0c\x03}\xdfS\xd75_\xbf~e\xbd^\xd34\r\x8e\xe3\xe0\xfb>\xb6m\
\xd3\xb6-]\xd7\x1d\x07\x08\xc3\x90\x8b\x8b\x0b\x94R4MC\xd7u\xacV+\xba\xae\
\xc3q\x1c\x84\x10\xa4iz\x12`\x1cG\xca\xb2\xe4\xf9\xf9\x99\xdb\xdb[\xee\xef\
\xef\rx\x10\x04x\x9e\xc7f\xb39\r\x90$\t\x1f?~\xa4\xaek6\x9b\rEQ\xd0u\x1d\xbb\
\xdd\x8e\xbb\xbb;\xc6qd\x9a\xa6\x83L\xcc\x91\x17E\xc1z\xbdf\xbd^\xb3\xdb\xed\
\xd0Z\x1b\x80,\xcb\x88\xa2\x08\xa5\x14///\xc7\x01\xd24\xe5\xd3\xa7O\xbc\xbc\
\xbc\xd0\xf7=sw\xf4}\xcf\xed\xed-M\xd3`Y\x16B\x08\x92$\xd9\x03\x98k\xbdZ\xad\
x||\xc4\xb2,\xa2("\x0cC\x92$\xe1\xc3\x87\x0fdY\xb6\xe7\xfc\x00\xc0\xf3<\xe28\
6N]\xd7\xc5\xb2,^__)\xcb\x92\xedv\xcb\xfd\xfd=Zk\xa6ib\x18\x06\x00\xaa\xaa2\
\x91o\xb7[\xfa\xbe\'\x8a"\x13\xf9\xe5\xe5%Y\x96\x99\xcc\x9d\x04\xf8\xb6\x14R\
J\xa4\x94\x0c\xc3\x80\xd6\xdaD\xfa\xf9\xf3g\x9a\xa6A\x08\xc1\xf9\xf99\x00y\
\x9e\xb3Z\xadx~~F\x08A\x14EDQD\x9a\xa6,\x97Knnn\xf0<\x8f\xef\xf5\xe6$\x80\
\xef\xfb\xf8\xbeO\xd34\xa6\x96\x00eYR\x96%y\x9e\xf3\xf0\xf0@Q\x14f=\xcfs\xba\
\xae\xdbK{\x92$\xa4ij\xfa\xbfi\x9a\xf7\x01\xcc&\xa5$I\x12\x93\xf2\xd9\x94R|\
\xf9\xf2\x05!\x04\x00\xd34\xa1\xb5&\x0cC\xe3<MS\xe28\xfeS\xed8\n0\x9f\xf6\
\xb9\xff\x83 `\x1cG\xe3\xb0(\n\xaa\xaa\xa2\xef{\x03\x1a\x86!q\x1c\x13\xc71Q\
\x14\xe1\xfb>\xae\xeb"\x84`\x18\x06\xf3\xdfw\x01h\xad\xe9\xfb\x9e\xae\xebPJa\
Y\x16q\x1cc\xdb\xb6\xc9\x84\x10\xe2(@\x9a\xa6\x04A\x80\x10\x02\xa5\x14]\xd7\
\xd1u\xdd\xc9L\xec\x01h\xad\x19\xc7\x11\xad5u]\x1b\xe7s4\xf3SJ\x89eY\xb4m\
\x0b\xbcu\xcf\xd9\xd9\x19gggDQ\x84\x94\x12\xa5\x14\xd34\xa1\x94\xa2\xaek\x82\
0>N\x02\xccCd\x18\x06^__\xb1m\x9b0\x0c\xf1<\x0f\xd7u\x99\xa6\x89\xf3\xf3s\
\xf2<\x07\xde\x0e\x1f@\x14E,\x97K...L\xa4s\xf4\xf3\\\x98\xa6\t\xc7q\x0ef\xc2\
\x1e\xc0L\xab\xb5F)\x85\xeb\xba,\x16\x0b\x82 \xc0u]#<\x8e\xe3\xd0\xb6-\x9e\
\xe7\x01\x10\xc71WWWdY\x06\xbc\xb5\xabR\n\xdb\xb6)\x8a\x82\xb6mi\xdb\x16\xcb\
\xb2PJ\x9d\x06\x98ew\xb1X\x18\xfd\x0e\x82\xc0\xcc\x81\xd9\x82 `\xb9\\\x9a\
\xcd\xa4\x94&\xc5\xf0v>\x1c\xc7!\x08\x02\xa6i\xc2\xb6m\x94RF\xdaO\x02\xcc\
\x9a>\x0b\x89\xe7yx\x9ewp!\x99\xc1N\x99m\xdb\xe63\x7f\xdf\xedv\xf4}\xff\xc7%\
\xf0}\x9f4MM\xddOM\xbd\xbfb\xf3\x1eQ\x141\x8e\xa3)\xdbQ\x80yn\xcf\xa7\xfc[\
\xbd\xff\'fY\x96\xb9k|\x1f\xd4\xd130\xcf\xff\x7f\xd3\xc6q4w\x8c=\x80\xa6i\
\x8c\xb8\xe4yn.\x11\xff\x85)\xa5\xd8n\xb7\xd4um\xd6\xc4\xcfw\xc3\xff=\xc0\
\xefa\x89?u1\xd3\xf5 \x00\x00\x00\x00IEND\xaeB`\x82\xc4\x1f\x08\x9f' )
def getNoMailBitmap():
return wx.BitmapFromImage(getNoMailImage())
def getNoMailImage():
stream = cStringIO.StringIO(getNoMailData())
return wx.ImageFromStream(stream)
def getNoMailIcon():
icon = wx.EmptyIcon()
icon.CopyFromBitmap(getNoMailBitmap())
return icon
#----------------------------------------------------------------------
def getErrMailData():
return zlib.decompress(
'x\xda\x01W\x05\xa8\xfa\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\
\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\
\x08d\x88\x00\x00\x05\x0eIDATX\x85\xcd\x97\xcf\x8f\xdb\xd4\x16\xc7?v\xae\x7f\
\xc5N&\x8e\xd3L\x92\xceL%T\x15\rbQQ!\xe8\x0e\xc4\x92\xff\x80%H\xac\xdeC\xf0\
\xfe\x94\x07\xdb\xf7\x96\xac\xfa\x1f TT\t\x06\x90\xa0,*UB#\x90f:i"\'\x99L\
\xec\xd8\xf1\xaf\x98\xc5LLC\x92\x8aH\xa0r$/|t\xef9\x1f\xdf\xfb\xbd\xe7\\K\
\x92\\\xe2E\x9a\xfcB\xb3\x03b\xdb\t\x9f}\xfa\xdf\xfc\xf5\xd1\x88\x83\xcf?\
\xa7\xf2\xf81\x00\xde\xe1!\xa7\xef\xbd\xc7\xf7\xf5:\xff\xfa\xf7G\xd2\xdf\n\
\xb0w\xff>\xd7\x83\x80\xeah\x84q\xe5\x93F#:GG\xec\x95\xcb\xdb\x86C\xdaV\x03\
\xdfjj\xfeZ\x9e#\xc71\xf2|\x0e\xc0\\\x96\x99\xab*?J\x12oF\xf1V+\xb0\xb5\x06\
\x1cUE\xccfEr\x00y>G\xccf8\xaa\xbam8\xc4\x7f>\xf98\xcf\xf3|\xc9\xd9n\xb7\xd9\
\xdb\xdbCQ\x94%\xff\xf5\xef\xbe\xa3~\xef\x1e\\\\\xac\rV\xaf\xd7\xf9\xe6\xc3\
\x0f\xf3\xb37\xdeX\xf2\'I\xc2\x93\'Ox\xfa\xf4\xe9*@\xa5RYu\nA\x92$\xe8\xba\
\x8eeY\xc5cw\xbb\xe8\xba\xbe\xf1kt]g\x7f\x7f\x1f\xeb\xe5\x97\xf1}\xbfx\x82 @\
\x08A\xb5Z]\xcd\xb5.\x90\xe7y\x84a\xc8\xee\xee.\x86a`\x9a&\xedv\x1b\xab\xd1@\
<g\x99UU\xa5\xd1h\xa0\xb7\xdbt\xbb]...\x18\x8dF\xf4\xfb}\xd24];g\t`\x91L\x92\
.u\x94\xe79\xc3\xe1\x10UU)\x97\xcb\x94\xc2\x90r\x96\xb1I\xb6Y\x96\x11\x86!\
\xe3\xf1\x98\xc1`\xc0p8$\xcfsvvv\x8ax\xd3\xe9\x940\x0c\xd7\x03T\xabU:\x9d\
\x0e\xa5\xd2e\x8a\xf3\xf3s\xfa\xfd>I\x92\x000w]\xdaq\xcc\xa65\x88\xe3\x18\
\xd7uyrr\xc2\xc9\xc9\t\xa3\xd1\x88k\xd7\xae\xd1j\xb5\n\xc0n\xb7\xfb|\x80\xfd\
\xfd}\xd24%\x08\x02\xe28&\x08\x02\x92$\xa1\xd7\xeb\xa1\xb9.N\x1coH\xff;@\xaf\
\xd7#I\x12L\xd3\xc44M,\xcb\xa2\\.#\x84\xc0\xf7}\xfa\xfd\xfef\x80\xbd\xbd=&\
\x93\tQ\x14aY\x16\xaa\xaa2\x1e\x8fq]\x97\xb2\xeb\xf2\xd2\x9f\x00p]\x17\xc7q\
\xa8\xd5j\xa8\xaaJ\xa9T\xa2^\xafS\xadV9;;[\x9a\xb3\x04\xa0\xaa*\x96e!I\x12Q\
\x14\x15\xfb\x15\xc71\xbe\xef#\x84(\xf4\xb1\xce$IB\x08\x81\xa6i\x94\xcbe*\
\x95J\xa1\xabj\xb5Z|\xd0F\x80\x85U*\x15TUe0\x18\xd0\xeb\xf50M\x93N\xa7C\xb3\
\xd9D\xd3\xb4\x8d\x00\x9a\xa6\xd1l6\x99w:h\x9a\x86\x10\x02\xc7qh4\x1a\xa8\
\xaa\xca\x1f\xeb\xcdF\x00M\xd3\xd04\x8d\xe9t\x8a,\xcb\xc5\xbbh\xb7\x99\xbe\
\xf2\n%IB\xef\xf5P\xa6S\x00\x12\xd3d\xd6j1=<D\xb4\xdb\xc5y\x97e\x19\xc30\x8a\
\xf7g\xc5\xf7\\\x80M\x16\x1c\x1c\xd0{\xf7]f\xad\x16\xbb_|Q\x00D\x8d\x06\xee\
\xdbos~\xe7\x0e\xb3+\xc5\xffY\xdb\n \xb5m|\xdbF\xb9\xb8 ;:*\xfc\x99e1\xbdy\
\x13\xff\xf0p\xab\xe4\xf0O\xbd\x90DQD\x1c\xc7dY\x86a\x18\x08\xb1<Lq\x1c\xa2\
\x1b7\x98\\\x1d\xc9\xe8\xc6\r\x84\xe3`\x9a\xe6\xf28E!\xcb2<\xcf[Q\xffs\x01|\
\xdf\xc7u]\x84\x104\x9b\xcd\xa22.,\x06\xce\xb3\x8c\xe4\xaa\xa0(\xbb\xbbX\xb7\
o\xe3\x1c\x1c,\x8d\xcb\xb2\x8c\xe9t\x8a\xef\xfb4\x1a\x8d\x15\xc0\x15\x80$I\
\x08\x82\xa0xj\xb5\x1a\xb6m\xaft\xc0sE\xe1\xc20\x08\xaeDh\x9a&V\xa7\x83m\xdb\
K\xe3f\xb3\x19a\x18\x16\xf1$I*\xca\xfaZ\x80\xc9d\xc2\xe9\xe9)\x95J\x85V\xab\
\x85i\x9a+\xcb\x0f\x97M\xab\xd5j\x15\xc1\x14E\xc10\x8c\x95q\x8b:\xa0\xeb:\
\xb3\xd9\x8c\xd3\xd3S&\x93\xc9f\x80(\x8a\xf0<\x8fj\xb5\x8a\xe38+E\'MS\xd24E\
\nCjA\x80\xbchR\x8aB*\xcb\xcc\xae\x92.\xa0\x85\x10\xec\xec\xec\xa0\xeb:\xddn\
\x17\xcf\xf3\x88\xa2h3\xc0\xa2\x19\xd5j\xb5\x95}\x07\x08\x82\x80\xe1p\x88x\
\xfc\x18\xe7\xe8\x08\xa3\xdb\xbd\x04\xeft\x18\xdd\xbdKrx\x88\xe38+\x17\x8fE/\
\x90$\t\xd7u7\x03\x18\x86\x81m\xdbh\x9aV|\xed\xb36\x1d\x8d\x18\x1f\x1f\xa3}\
\xfd5;\xf7\xee!\xfd\xfc\xf3\xe5\xca\xdc\xba\x857\x9f3S\x14tIZ\xabxM\xd3\xb0m\
{e\xab\xd6j`\xd3\x91)=z\x84\xf3\xe5\x97\x94\x1f>D\x1b\x0c~\x0f>\x18p\xed\xfe\
}\x82\xf1\x98\xe0\x9dw\xf0^}u\xed\xfc8\x8eW5\x10\x86a\xd1$\xfa\xfd>\xaa\xaa\
\xae\x15\x1e@\xeb\xa7\x9fx\xe9\xc1\x03v\x8e\x8f\x91\x9fi\xcb\xcaxL\xed\xe1C$\
\xcf\xe3\x17\xc7\xa1\xf7\x87\xcb\xec\xc2\xd24\xa5\xdf\xef\x13\x04A\xe1\xdb\
\xfa\xbf\xe0\xab\x0f\xde\xcfo\x9e\x9da\xff\xf0\x03\xc6U\x1d\x08ww9\xbfs\x87\
\xe3\xeb\xd7y\xeb\x7f\xff\xff{\xff\x8c\x1e\xdd\xbe\x8dqp@\xe9\xd7_\xc9\xaf\
\x00\xbcz\x9d\xee\xdd\xbb<\xaa\xd7\xb7\r\xb7\xfd\n\xfc\xd5\xf6\xc2\x9b\xd1o\
\xd1r.\xaf\xfe\x90\x016\x00\x00\x00\x00IEND\xaeB`\x82\x8a\x1a\x9f\x99' )
def getErrMailBitmap():
return wx.BitmapFromImage(getErrMailImage())
def getErrMailImage():
stream = cStringIO.StringIO(getErrMailData())
return wx.ImageFromStream(stream)
def getErrMailIcon():
icon = wx.EmptyIcon()
icon.CopyFromBitmap(getErrMailBitmap())
return icon
| doudz/checkfeedmail | icon.py | Python | bsd-2-clause | 9,632 |
import flask; from flask import request
import os
import urllib.parse
from voussoirkit import flasktools
from voussoirkit import gentools
from voussoirkit import stringtools
import etiquette
from .. import common
site = common.site
session_manager = common.session_manager
# Individual albums ################################################################################
@site.route('/album/<album_id>')
def get_album_html(album_id):
album = common.P_album(album_id, response_type='html')
response = common.render_template(
request,
'album.html',
album=album,
view=request.args.get('view', 'grid'),
)
return response
@site.route('/album/<album_id>.json')
def get_album_json(album_id):
album = common.P_album(album_id, response_type='json')
album = album.jsonify()
return flasktools.json_response(album)
@site.route('/album/<album_id>.zip')
def get_album_zip(album_id):
album = common.P_album(album_id, response_type='html')
recursive = request.args.get('recursive', True)
recursive = stringtools.truthystring(recursive)
streamed_zip = etiquette.helpers.zip_album(album, recursive=recursive)
if album.title:
download_as = f'album {album.id} - {album.title}.zip'
else:
download_as = f'album {album.id}.zip'
download_as = etiquette.helpers.remove_path_badchars(download_as)
download_as = urllib.parse.quote(download_as)
outgoing_headers = {
'Content-Type': 'application/octet-stream',
'Content-Disposition': f'attachment; filename*=UTF-8\'\'{download_as}',
}
return flask.Response(streamed_zip, headers=outgoing_headers)
@site.route('/album/<album_id>/add_child', methods=['POST'])
@flasktools.required_fields(['child_id'], forbid_whitespace=True)
def post_album_add_child(album_id):
album = common.P_album(album_id, response_type='json')
child_ids = stringtools.comma_space_split(request.form['child_id'])
children = list(common.P_albums(child_ids, response_type='json'))
print(children)
album.add_children(children, commit=True)
response = album.jsonify()
return flasktools.json_response(response)
@site.route('/album/<album_id>/remove_child', methods=['POST'])
@flasktools.required_fields(['child_id'], forbid_whitespace=True)
def post_album_remove_child(album_id):
album = common.P_album(album_id, response_type='json')
child_ids = stringtools.comma_space_split(request.form['child_id'])
children = list(common.P_albums(child_ids, response_type='json'))
album.remove_children(children, commit=True)
response = album.jsonify()
return flasktools.json_response(response)
@site.route('/album/<album_id>/remove_thumbnail_photo', methods=['POST'])
def post_album_remove_thumbnail_photo(album_id):
album = common.P_album(album_id, response_type='json')
album.set_thumbnail_photo(None)
common.P.commit(message='album remove thumbnail photo endpoint')
return flasktools.json_response(album.jsonify())
@site.route('/album/<album_id>/refresh_directories', methods=['POST'])
def post_album_refresh_directories(album_id):
album = common.P_album(album_id, response_type='json')
for directory in album.get_associated_directories():
if not directory.is_dir:
continue
digest = common.P.digest_directory(directory, new_photo_ratelimit=0.1)
gentools.run(digest)
common.P.commit(message='refresh album directories endpoint')
return flasktools.json_response({})
@site.route('/album/<album_id>/set_thumbnail_photo', methods=['POST'])
@flasktools.required_fields(['photo_id'], forbid_whitespace=True)
def post_album_set_thumbnail_photo(album_id):
album = common.P_album(album_id, response_type='json')
photo = common.P_photo(request.form['photo_id'], response_type='json')
album.set_thumbnail_photo(photo)
common.P.commit(message='album set thumbnail photo endpoint')
return flasktools.json_response(album.jsonify())
# Album photo operations ###########################################################################
@site.route('/album/<album_id>/add_photo', methods=['POST'])
@flasktools.required_fields(['photo_id'], forbid_whitespace=True)
def post_album_add_photo(album_id):
'''
Add a photo or photos to this album.
'''
album = common.P_album(album_id, response_type='json')
photo_ids = stringtools.comma_space_split(request.form['photo_id'])
photos = list(common.P_photos(photo_ids, response_type='json'))
album.add_photos(photos, commit=True)
response = album.jsonify()
return flasktools.json_response(response)
@site.route('/album/<album_id>/remove_photo', methods=['POST'])
@flasktools.required_fields(['photo_id'], forbid_whitespace=True)
def post_album_remove_photo(album_id):
'''
Remove a photo or photos from this album.
'''
album = common.P_album(album_id, response_type='json')
photo_ids = stringtools.comma_space_split(request.form['photo_id'])
photos = list(common.P_photos(photo_ids, response_type='json'))
album.remove_photos(photos, commit=True)
response = album.jsonify()
return flasktools.json_response(response)
# Album tag operations #############################################################################
@site.route('/album/<album_id>/add_tag', methods=['POST'])
def post_album_add_tag(album_id):
'''
Apply a tag to every photo in the album.
'''
response = {}
album = common.P_album(album_id, response_type='json')
tag = request.form['tagname'].strip()
try:
tag = common.P_tag(tag, response_type='json')
except etiquette.exceptions.NoSuchTag as exc:
response = exc.jsonify()
return flasktools.json_response(response, status=404)
recursive = request.form.get('recursive', False)
recursive = stringtools.truthystring(recursive)
album.add_tag_to_all(tag, nested_children=recursive, commit=True)
response['action'] = 'add_tag'
response['tagname'] = tag.name
return flasktools.json_response(response)
# Album metadata operations ########################################################################
@site.route('/album/<album_id>/edit', methods=['POST'])
def post_album_edit(album_id):
'''
Edit the title / description.
'''
album = common.P_album(album_id, response_type='json')
title = request.form.get('title', None)
description = request.form.get('description', None)
album.edit(title=title, description=description, commit=True)
response = album.jsonify(minimal=True)
return flasktools.json_response(response)
@site.route('/album/<album_id>/show_in_folder', methods=['POST'])
def post_album_show_in_folder(album_id):
if not request.is_localhost:
flask.abort(403)
album = common.P_album(album_id, response_type='json')
directories = album.get_associated_directories()
if len(directories) != 1:
flask.abort(400)
directory = directories.pop()
if os.name == 'nt':
command = f'start explorer.exe "{directory.absolute_path}"'
os.system(command)
return flasktools.json_response({})
flask.abort(501)
# Album listings ###################################################################################
@site.route('/all_albums.json')
@flasktools.cached_endpoint(max_age=15)
def get_all_album_names():
all_albums = {album.id: album.display_name for album in common.P.get_albums()}
response = {'albums': all_albums}
return flasktools.json_response(response)
def get_albums_core():
albums = list(common.P.get_root_albums())
albums.sort(key=lambda x: x.display_name.lower())
return albums
@site.route('/albums')
def get_albums_html():
albums = get_albums_core()
response = common.render_template(
request,
'album.html',
albums=albums,
view=request.args.get('view', 'grid'),
)
return response
@site.route('/albums.json')
def get_albums_json():
albums = get_albums_core()
albums = [album.jsonify(minimal=True) for album in albums]
return flasktools.json_response(albums)
# Album create and delete ##########################################################################
@site.route('/albums/create_album', methods=['POST'])
def post_albums_create():
title = request.form.get('title', None)
description = request.form.get('description', None)
parent_id = request.form.get('parent_id', None)
if parent_id is not None:
parent = common.P_album(parent_id, response_type='json')
user = session_manager.get(request).user
album = common.P.new_album(title=title, description=description, author=user)
if parent_id is not None:
parent.add_child(album)
common.P.commit('create album endpoint')
response = album.jsonify(minimal=False)
return flasktools.json_response(response)
@site.route('/album/<album_id>/delete', methods=['POST'])
def post_album_delete(album_id):
album = common.P_album(album_id, response_type='json')
album.delete(commit=True)
return flasktools.json_response({})
| voussoir/etiquette | frontends/etiquette_flask/backend/endpoints/album_endpoints.py | Python | bsd-2-clause | 9,106 |
#!/usr/bin/env python
import sys
def ip2str(ip):
l = [
(ip >> (3*8)) & 0xFF,
(ip >> (2*8)) & 0xFF,
(ip >> (1*8)) & 0xFF,
(ip >> (0*8)) & 0xFF,
]
return '.'.join([str(i) for i in l])
def str2ip(line):
a, b, c, d = [int(s) for s in line.split('.')]
ip = 0
ip += (a << (3*8))
ip += (b << (2*8))
ip += (c << (1*8))
ip += (d << (0*8))
return ip
blockip = str2ip(sys.stdin.readline())
hostmask = 1
bitcount = 1
for line in sys.stdin.readlines():
try:
ip = str2ip(line.strip())
except:
print 'Ignored line:', line,
continue
while (blockip & (~hostmask)) != (ip & (~hostmask)):
hostmask = (hostmask << 1) | 1
bitcount += 1
print ip2str(blockip & (~hostmask)) + '/' + str(bitcount), 'hostmask =', ip2str(hostmask)
print 'wrong way around'
| owalch/oliver | linux/config/scripts/ipblock.py | Python | bsd-2-clause | 870 |
"""
Workaround for a conda-build bug where failing to compile some Python files
results in a build failure.
See https://github.com/conda/conda-build/issues/1001
"""
import os
import sys
py2_only_files = []
py3_only_files = [
'numba/tests/annotation_usecases.py',
]
def remove_files(basedir):
"""
Remove unwanted files from the current source tree
"""
if sys.version_info >= (3,):
removelist = py2_only_files
msg = "Python 2-only file"
else:
removelist = py3_only_files
msg = "Python 3-only file"
for relpath in removelist:
path = os.path.join(basedir, relpath)
print("Removing %s %r" % (msg, relpath))
os.remove(path)
if __name__ == "__main__":
remove_files('.')
| stefanseefeld/numba | buildscripts/remove_unwanted_files.py | Python | bsd-2-clause | 764 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import random
import sys
class DayLife:
"""Life in a day."""
def __init__(self, date, life):
"""Set birth datetime and life."""
self.birthdate = date
self.life = life
finalyear = self.birthdate.year + self.life
finaldate = datetime.datetime(finalyear, self.birthdate.month,
self.birthdate.day)
self.finaldate = finaldate - datetime.timedelta(days=1)
def now(self):
"""Calculate current time."""
curdate = datetime.datetime.now()
maxdays = (self.finaldate - self.birthdate).days
curdays = (curdate - self.birthdate).days
curtime = datetime.timedelta(days=1) / maxdays
curtime = curtime * curdays
return datetime.time(
(curtime.seconds / 60) / 60,
(curtime.seconds / 60) % 60,
curtime.seconds % 60)
if __name__ == '__main__':
# options
startyear = 1900
endyear = 2000
life = 200
print startyear, "<= a <=", endyear
print "n =", life
daycount = (datetime.datetime(endyear, 12, 31) -
datetime.datetime(startyear, 1, 1)).days
birthdate = datetime.datetime(startyear, 1, 1) + \
datetime.timedelta(days=random.randint(0, daycount))
args = sys.argv
if len(args) == 4:
year = int(args[1])
month = int(args[2])
date = int(args[3])
birthdate = datetime.datetime(year, month, date)
print "birthdate:", birthdate.date()
mylife = DayLife(birthdate, life)
print "finaldate:", mylife.finaldate.date()
print "today:", mylife.now()
| wakamori/GoForIt | 1/1-2.py | Python | bsd-2-clause | 1,719 |
# -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
| andreas-kowasch/DomainSearch | DomainSearchViewer/additional/Scheduler.py | Python | bsd-2-clause | 3,767 |
from django.conf.urls import patterns, include, url
from django.shortcuts import redirect, render_to_response
from django.template.context import RequestContext
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
# Just redirect / to /blog for now until I can
# come up with something to put on the homepage..
def to_blog(request):
return redirect('/blog/', permanent=False)
# Follow the BSD license and allow the source/binary to reproduce
# the license and copyright message
def sslicense(request):
slicense = """
Copyright (c) 2012-2013 Justin Crawford <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE
"""
ctx = {
'parts': {
"title": "License",
"html_title": "License",
"fragment": slicense.replace('\n', '<br>'),
},
}
return render_to_response('docs/docs.html', RequestContext(request, ctx))
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'StackSmash.views.home', name='home'),
# url(r'^StackSmash/', include('StackSmash.foo.urls')),
# TODO: Fix index and use something... Should identify subdomains somehow..
#url(r'^$', include('StackSmash.apps.blog.urls')),
url(r'^license/', sslicense, name='license'),
#url(r'^docs/', include('StackSmash.apps.docs.urls'), name='docs', app_name='docs'),
url(r'^blog/', include('StackSmash.apps.blog.urls', namespace='blog')),
url(r'^projects/', include('StackSmash.apps.projects.urls', namespace='projects')),
url(r'^upload/', include('StackSmash.apps.uploader.urls', namespace='upload')),
url(r'^$', to_blog, name='index'),
#url(r'^projects/', include('StackSmash.apps.projects.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls), name='admin'),
)
| Justasic/StackSmash | StackSmash/urls.py | Python | bsd-2-clause | 3,146 |
import sys
import collections
class GeocoderResult(collections.Iterator):
"""
A geocoder resultset to iterate through address results.
Exemple:
results = Geocoder.geocode('paris, us')
for result in results:
print(result.formatted_address, result.location)
Provide shortcut to ease field retrieval, looking at 'types' in each
'address_components'.
Example:
result.country
result.postal_code
You can also choose a different property to display for each lookup type.
Example:
result.country__short_name
By default, use 'long_name' property of lookup type, so:
result.country
and:
result.country__long_name
are equivalent.
"""
attribute_mapping = {
"state": "administrative_area_level_1",
"province": "administrative_area_level_1",
"city": "locality",
"county": "administrative_area_level_2",
}
def __init__(self, data):
"""
Creates instance of GeocoderResult from the provided JSON data array
"""
self.data = data
self.len = len(self.data)
self.current_index = 0
self.current_data = self.data[0]
def __len__(self):
return self.len
def __iter__(self):
return self
def return_next(self):
if self.current_index >= self.len:
raise StopIteration
self.current_data = self.data[self.current_index]
self.current_index += 1
return self
def __getitem__(self, key):
"""
Accessing GeocoderResult by index will return a GeocoderResult
with just one data entry
"""
return GeocoderResult([self.data[key]])
def __unicode__(self):
return self.formatted_address
if sys.version_info[0] >= 3: # Python 3
def __str__(self):
return self.__unicode__()
def __next__(self):
return self.return_next()
else: # Python 2
def __str__(self):
return self.__unicode__().encode('utf8')
def next(self):
return self.return_next()
@property
def count(self):
return self.len
@property
def coordinates(self):
"""
Return a (latitude, longitude) coordinate pair of the current result
"""
location = self.current_data['geometry']['location']
return location['lat'], location['lng']
@property
def latitude(self):
return self.coordinates[0]
@property
def longitude(self):
return self.coordinates[1]
@property
def raw(self):
"""
Returns the full result set in dictionary format
"""
return self.data
@property
def valid_address(self):
"""
Returns true if queried address is valid street address
"""
return self.current_data['types'] == ['street_address']
@property
def formatted_address(self):
return self.current_data['formatted_address']
def __getattr__(self, name):
lookup = name.split('__')
attribute = lookup[0]
if (attribute in GeocoderResult.attribute_mapping):
attribute = GeocoderResult.attribute_mapping[attribute]
try:
prop = lookup[1]
except IndexError:
prop = 'long_name'
for elem in self.current_data['address_components']:
if attribute in elem['types']:
return elem[prop]
class GeocoderError(Exception):
"""Base class for errors in the :mod:`pygeocoder` module.
Methods of the :class:`Geocoder` raise this when something goes wrong.
"""
#: See http://code.google.com/apis/maps/documentation/geocoding/index.html#StatusCodes
#: for information on the meaning of these status codes.
G_GEO_OK = "OK"
G_GEO_ZERO_RESULTS = "ZERO_RESULTS"
G_GEO_OVER_QUERY_LIMIT = "OVER_QUERY_LIMIT"
G_GEO_REQUEST_DENIED = "REQUEST_DENIED"
G_GEO_MISSING_QUERY = "INVALID_REQUEST"
def __init__(self, status, url=None, response=None):
"""Create an exception with a status and optional full response.
:param status: Either a ``G_GEO_`` code or a string explaining the
exception.
:type status: int or string
:param url: The query URL that resulted in the error, if any.
:type url: string
:param response: The actual response returned from Google, if any.
:type response: dict
"""
Exception.__init__(self, status) # Exception is an old-school class
self.status = status
self.url = url
self.response = response
def __str__(self):
"""Return a string representation of this :exc:`GeocoderError`."""
return 'Error %s\nQuery: %s' % (self.status, self.url)
def __unicode__(self):
"""Return a unicode representation of this :exc:`GeocoderError`."""
return unicode(self.__str__())
| zoeren/pygeocoder | pygeolib.py | Python | bsd-3-clause | 4,972 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from djangocms_carousel import __version__
INSTALL_REQUIRES = [
]
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-carousel',
version=__version__,
description='Slider Plugin for django CMS',
author='Andrew Mirsky',
author_email='[email protected]',
url='https://git.mirsky.net/mirskyconsulting/djangocms-carousel',
packages=['djangocms_carousel', 'djangocms_carousel.migrations'],
install_requires=INSTALL_REQUIRES,
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
| mirskytech/djangocms-carousel | setup.py | Python | bsd-3-clause | 1,241 |
from scout.parse.variant.rank_score import parse_rank_score
from scout.parse.variant.variant import parse_variant
def test_parse_rank_score():
## GIVEN a rank score string on genmod format
rank_scores_info = "123:10"
variant_score = 10.0
family_id = "123"
## WHEN parsing the rank score
parsed_rank_score = parse_rank_score(rank_scores_info, family_id)
## THEN assert that the correct rank score is parsed
assert variant_score == parsed_rank_score
def test_parse_rank_score_no_score():
## GIVEN a empty rank score string
rank_scores_info = ""
family_id = "123"
## WHEN parsing the rank score
parsed_rank_score = parse_rank_score(rank_scores_info, family_id)
## THEN assert that None is returned
assert parsed_rank_score == None
def test_parse_rank_score_variant(cyvcf2_variant, case_obj, scout_config):
## GIVEN a variant
rank_score = 15
case_id = case_obj["_id"]
## WHEN adding a rank score string to the INFO field
rank_score_str = f"{case_id}:{rank_score}"
cyvcf2_variant.INFO["RankScore"] = rank_score_str
## WHEN parsing the variant
var_info = parse_variant(cyvcf2_variant, case_obj)
## THEN assert that the correct score is parsed
assert var_info["rank_score"] == rank_score
| Clinical-Genomics/scout | tests/parse/test_parse_rank_score.py | Python | bsd-3-clause | 1,288 |
import amitgroup as ag
import numpy as np
ag.set_verbose(True)
# This requires you to have the MNIST data set.
data, digits = ag.io.load_mnist('training', selection=slice(0, 100))
pd = ag.features.PartsDescriptor((5, 5), 20, patch_frame=1, edges_threshold=5, samples_per_image=10)
# Use only 100 of the digits
pd.train_from_images(data)
# Save the model to a file.
#pd.save('parts_model.npy')
# You can then load it again by
#pd = ag.features.PartsDescriptor.load(filename)
# Then you can extract features by
#features = pd.extract_features(image)
# Visualize the parts
ag.plot.images(pd.visparts)
| amitgroup/amitgroup | examples/parts_descriptor_test.py | Python | bsd-3-clause | 608 |
from django.contrib.gis.geoip2 import GeoIP2
from geoip2.errors import GeoIP2Error
from ipware import get_client_ip
def get_location_from_ip(request):
client_ip, is_routable = get_client_ip(request)
if client_ip is not None:
g = GeoIP2()
try:
record = g.city(client_ip)
except GeoIP2Error:
return None
if record:
city = record.get('city') or ''
country = record.get('country') or ''
delimeter = ', ' if city and country else ''
return f'{city}{delimeter}{country}'
return None
| richardcornish/smsweather | emojiweather/utils/utils.py | Python | bsd-3-clause | 596 |
# BinGrep, version 1.0.0
# Copyright 2017 Hiroki Hada
# coding:UTF-8
import sys, os, time, argparse
import re
import pprint
#import pydot
import math
import cPickle
import ged_node
from idautils import *
from idc import *
import idaapi
def idascript_exit(code=0):
idc.Exit(code)
def get_short_function_name(function):
return function.replace("?", "")[:100]
def mkdir(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def cPickle_dump(filename, data):
with open(filename, "wb") as f:
cPickle.dump(data, f)
def print_cfg(cfg):
for block in cfg:
print "[%02d]" % block.id,
print hex(block.startEA),
succs = list(block.succs())
print "(succs(%d): " % len(succs),
for i in range(len(succs)):
sys.stdout.write(hex(succs[i].startEA))
if i < len(succs) - 1:
sys.stdout.write(", ")
print ")"
def output_cfg_as_png_rec(g, block, memo):
functions1, dummy = get_marks(block, 0)
hashed_label1 = hash_label(functions1)
label1 = hex(block.startEA) + ("\n%08x" % hashed_label1)
g.add_node(pydot.Node(label1, fontcolor='#FFFFFF', color='#333399'))
for b in list(block.succs()):
functions2, dummy = get_marks(b, 0)
hashed_label2 = hash_label(functions2)
label2 = hex(b.startEA) + ("\n%08x" % hashed_label2)
if b.startEA not in memo:
memo.append(b.startEA)
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold'))
output_cfg_as_png_rec(g, b, memo)
else:
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold, dotted'))
def output_cfg_as_png(cfg, filename, overwrite_flag):
blocks_src = {}
blocks_dst = {}
block = cfg[0]
f_name = GetFunctionName(block.startEA)
if not overwrite_flag and os.path.exists(filename):
return
g = pydot.Dot(graph_type='digraph', bgcolor="#F0E0FF")
size = "21"
g.set_rankdir('TB')
g.set_size(size)
g.add_node(pydot.Node('node', shape='ellipse', margin='0.05', fontcolor='#FFFFFF', fontsize=size, color='#333399', style='filled', fontname='Consolas Bold'))
g.add_node(pydot.Node('edge', color='lightgrey'))
memo = []
output_cfg_as_png_rec(g, block, memo)
g.write_png(filename)
def get_cfg(function_start, function_end):
f_name = GetFunctionName(function_start)
cfg = idaapi.FlowChart(idaapi.get_func(function_start))
return list(cfg)
def get_cfgs():
cfgs = []
for ea in Segments():
functions = list(Functions(SegStart(ea), SegEnd(ea)))
functions.append(SegEnd(ea))
for i in range(len(functions) - 1):
function_start = functions[i]
function_end = functions[i+1]
cfg = get_cfg(function_start, function_end)
cfgs.append(cfg)
return cfgs
def hash_label(marks):
tmp = sorted(set(marks))
tmp = "".join(tmp)
tmp = tmp.upper()
def rot13(string):
return reduce(lambda h,c: ((h>>13 | h<<19)+ord(c)) & 0xFFFFFFFF, [0]+list(string))
hashed_label = rot13(tmp)
hashed_label = hashed_label & 0xFFFFFFFF
return hashed_label
def get_marks(block, gamma):
marks = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
if mnem not in ["call"]:
for buf in (opnd[1], opnd[2]):
if buf:
match = re.search("([\dA-F]+)h", buf)
if match:
magic = int(match.group(1), 16)
if 0x00001000 <= magic <= 0xffffffff:
marks.append(hex(magic))
for buf in (opnd[0], opnd[1], opnd[2]):
if buf:
match = re.search("offset (a[\S]+)", buf)
if match:
offset_a = match.group(1)
if offset_a[:4] == "asc_": continue
marks.append(offset_a)
continue
else:
gamma += 1
if opnd[0][:4] == "sub_": continue
if opnd[0][0] in ["?", "$"]: continue
if opnd[0] in ["eax", "ebx", "ecx", "edx", "esi", "edi"]: continue
if opnd[0] in ["__SEH_prolog4", "__SEH_epilog4", "__EH_prolog3_catch"]: continue
if opnd[0].find("cookie") >= 0: continue
marks.append(opnd[0])
continue
return marks, gamma
def get_mnems(block):
mnems = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
buf = " "
for o in opnd:
if not o: break
elif o in ["eax", "ebx", "ecx", "edx", "ax", "bx", "cx", "dx", "al", "bl", "cl", "dl", "ah", "bh", "ch", "dh", "esi", "edi", "si", "di", "esp", "ebp"]:
buf += "reg "
elif o[:3] == "xmm": buf += "reg "
elif o.find("[") >= 0: buf += "mem "
elif o[:6] == "offset": buf += "off "
elif o[:4] == "loc_": buf += "loc "
elif o[:4] == "sub_": buf += "sub "
elif o.isdigit(): buf += "num "
elif re.match("[\da-fA-F]+h", o): buf += "num "
elif o[:6] == "dword_": buf += "dwd "
else: buf += "lbl "
mnems.append(mnem + buf)
return mnems
def cfg_to_cft_rec(block, memo, abr):
(alpha, beta, gamma) = abr
alpha += 1
marks, gamma = get_marks(block, gamma)
hashed_label = hash_label(marks)
mnems = get_mnems(block)
tree = ged_node.Node(hashed_label)
for b in list(block.succs()):
beta += 1
if b.startEA not in memo:
memo.append(b.startEA)
tmp, (alpha, beta, gamma), tmp2 = cfg_to_cft_rec(b, memo, (alpha, beta, gamma))
tree = tree.addkid(tmp)
mnems += tmp2
return tree, (alpha, beta, gamma), mnems
def cfg_to_cft(cfg):
block = cfg[0]
memo = []
memo.append(block.startEA)
return cfg_to_cft_rec(block, memo, (0, 0, 0))
def dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite):
function_num = len(cfgs)
dump_data_list = {}
for cfg in cfgs:
function_name = GetFunctionName(cfg[0].startEA)
(cft, abr, mnems) = cfg_to_cft(cfg)
dump_data_list[function_name] = {}
dump_data_list[function_name]["FUNCTION_NAME"] = function_name
dump_data_list[function_name]["CFT"] = cft
dump_data_list[function_name]["ABR"] = abr
dump_data_list[function_name]["MNEMS"] = mnems
def dump_pickle(dump_data_list, program, function, f_overwrite):
function_name_short = get_short_function_name(function)
filename_pickle = os.path.join(function_name_short + ".pickle")
if f_overwrite or not os.path.exists(filename_pickle):
cPickle_dump(filename_pickle, dump_data_list[function])
cPickle_dump(program + ".dmp", dump_data_list)
def main(function, f_image, f_all, f_overwrite):
sys.setrecursionlimit(3000)
program = idaapi.get_root_filename()
start_time = time.time()
cfgs = get_cfgs()
dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite)
result_time = time.time() - start_time
print "Dump finished."
print "result_time: " + str(result_time) + " sec."
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="")
parser.add_argument('-f', dest='function', default=None, type=str, help='')
parser.add_argument('-a', dest='f_all', default=False, action='store_true', help='')
parser.add_argument('-i', dest='f_image', default=False, action='store_true', help='Image Flag (Output as PNG)')
parser.add_argument('-o', dest='f_overwrite', default=False, action='store_true', help='Overwrite file')
args = parser.parse_args()
function = args.function
f_image = args.f_image
f_all = args.f_all
f_overwrite = args.f_overwrite
main(function, f_image, f_all, f_overwrite)
#idascript_exit()
| hada2/bingrep | bingrep_dump.py | Python | bsd-3-clause | 8,471 |
# -*- python -*-
# stdlib imports ---
import os
import os.path as osp
import textwrap
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def options(opt):
opt.load('hwaf-base', tooldir=_heptooldir)
opt.add_option(
'--with-cmake',
default=None,
help="Look for CMake at the given path")
return
def configure(conf):
conf.load('hwaf-base', tooldir=_heptooldir)
return
@conf
def find_cmake(ctx, **kwargs):
if not ctx.env.HWAF_FOUND_C_COMPILER:
ctx.fatal('load a C compiler first')
pass
if not ctx.env.HWAF_FOUND_CXX_COMPILER:
ctx.fatal('load a C++ compiler first')
pass
path_list = waflib.Utils.to_list(kwargs.get('path_list', []))
if getattr(ctx.options, 'with_cmake', None):
topdir = ctx.options.with_cmake
topdir = ctx.hwaf_subst_vars(topdir)
path_list.append(osp.join(topdir, "bin"))
pass
kwargs['path_list'] = path_list
ctx.find_program(
"cmake",
var="CMAKE",
**kwargs)
kwargs['mandatory'] = False
ctx.find_program(
"ccmake",
var="CCMAKE",
**kwargs)
ctx.find_program(
"cpack",
var="CPACK",
**kwargs)
ctx.find_program(
"ctest",
var="CTEST",
**kwargs)
version="N/A"
cmd = [ctx.env.CMAKE, "--version"]
lines=ctx.cmd_and_log(cmd).splitlines()
for l in lines:
l = l.lower()
if "version" in l:
version=l[l.find("version")+len("version"):].strip()
break
pass
ctx.start_msg("CMake version")
ctx.end_msg(version)
ctx.hwaf_declare_runtime_env('CMAKE')
ctx.env.CMAKE_HOME = osp.dirname(osp.dirname(ctx.env.CMAKE))
ctx.env.CMAKE_VERSION = version
ctx.env.HWAF_FOUND_CMAKE = 1
return
## EOF ##
| hwaf/hwaf | py-hwaftools/find_cmake.py | Python | bsd-3-clause | 1,951 |
# ----------------------------------------------------------------------------
# Copyright (c) 2008 Andrew D. Straw and Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# Based on pygxinput originally by Andrew D. Straw
# http://code.astraw.com/projects/motmot/wiki/pygxinput
import ctypes
import pyglet
from pyglet.window.xlib import xlib
import lib_xinput as xi
class XInputDevice:
def __init__(self, display, device_info):
self._x_display = display._display
self._device_id = device_info.id
self.name = device_info.name
self._open_device = None
# TODO: retrieve inputclassinfo from device_info and expose / save
# for valuator axes etc.
def open(self):
if self._open_device:
return
self._open_device = xi.XOpenDevice(self._x_display, self._device_id)
if not self._open_device:
raise Exception('Cannot open device')
def close(self):
if not self._open_device:
return
xi.XCloseDevice(self._x_display, self._open_device)
def attach(self, window):
assert window._x_display == self._x_display
return XInputDeviceInstance(self, window)
class XInputDeviceInstance(pyglet.event.EventDispatcher):
def __init__(self, device, window):
"""Create an opened instance of a device on the given window.
:Parameters:
`device` : XInputDevice
Device to open
`window` : Window
Window to open device on
"""
assert device._x_display == window._x_display
assert device._open_device
self.device = device
self.window = window
self._events = list()
try:
dispatcher = window.__xinput_window_event_dispatcher
except AttributeError:
dispatcher = window.__xinput_window_event_dispatcher = \
XInputWindowEventDispatcher()
dispatcher.add_instance(self)
device = device._open_device.contents
if not device.num_classes:
return
# Bind matching extended window events to bound instance methods
# on this object.
#
# This is inspired by test.c of xinput package by Frederic
# Lepied available at x.org.
#
# In C, this stuff is normally handled by the macro DeviceKeyPress and
# friends. Since we don't have access to those macros here, we do it
# this way.
for i in range(device.num_classes):
class_info = device.classes[i]
if class_info.input_class == xi.KeyClass:
self._add(class_info, xi._deviceKeyPress,
dispatcher._event_xinput_key_press)
self._add(class_info, xi._deviceKeyRelease,
dispatcher._event_xinput_key_release)
elif class_info.input_class == xi.ButtonClass:
self._add(class_info, xi._deviceButtonPress,
dispatcher._event_xinput_button_press)
self._add(class_info, xi._deviceButtonRelease,
dispatcher._event_xinput_button_release)
elif class_info.input_class == xi.ValuatorClass:
self._add(class_info, xi._deviceMotionNotify,
dispatcher._event_xinput_motion)
elif class_info.input_class == xi.ProximityClass:
self._add(class_info, xi._proximityIn,
dispatcher._event_xinput_proximity_in)
self._add(class_info, xi._proximityOut,
dispatcher._event_xinput_proximity_out)
elif class_info.input_class == xi.FeedbackClass:
pass
elif class_info.input_class == xi.FocusClass:
pass
elif class_info.input_class == xi.OtherClass:
pass
array = (xi.XEventClass * len(self._events))(*self._events)
xi.XSelectExtensionEvent(window._x_display,
window._window,
array,
len(array))
def _add(self, class_info, event, handler):
_type = class_info.event_type_base + event
_class = self.device._device_id << 8 | _type
self._events.append(_class)
self.window._event_handlers[_type] = handler
XInputDeviceInstance.register_event_type('on_button_press')
XInputDeviceInstance.register_event_type('on_button_release')
XInputDeviceInstance.register_event_type('on_motion')
XInputDeviceInstance.register_event_type('on_proximity_in')
XInputDeviceInstance.register_event_type('on_proximity_out')
class XInputWindowEventDispatcher:
def __init__(self):
self._instances = dict()
def add_instance(self, instance):
self._instances[instance.device._device_id] = instance
def remove_instance(self, instance):
del self._instances[instance.device._device_id]
def dispatch_instance_event(self, e, *args):
try:
instance = self._instances[e.deviceid]
except KeyError:
return
instance.dispatch_event(*args)
@pyglet.window.xlib.XlibEventHandler(0)
def _event_xinput_key_press(self, ev):
raise NotImplementedError('TODO')
@pyglet.window.xlib.XlibEventHandler(0)
def _event_xinput_key_release(self, ev):
raise NotImplementedError('TODO')
@pyglet.window.xlib.XlibEventHandler(0)
def _event_xinput_button_press(self, ev):
e = ctypes.cast(ctypes.byref(ev),
ctypes.POINTER(xi.XDeviceButtonEvent)).contents
self.dispatch_instance_event(e, 'on_button_press', e.button)
@pyglet.window.xlib.XlibEventHandler(0)
def _event_xinput_button_release(self, ev):
e = ctypes.cast(ctypes.byref(ev),
ctypes.POINTER(xi.XDeviceButtonEvent)).contents
self.dispatch_instance_event(e, 'on_button_release', e.button)
@pyglet.window.xlib.XlibEventHandler(0)
def _event_xinput_motion(self, ev):
e = ctypes.cast(ctypes.byref(ev),
ctypes.POINTER(xi.XDeviceMotionEvent)).contents
axis_data = list()
for i in range(e.axes_count):
axis_data.append(e.axis_data[i])
self.dispatch_instance_event(e, 'on_motion', axis_data, e.x, e.y)
@pyglet.window.xlib.XlibEventHandler(0)
def _event_xinput_proximity_in(self, ev):
e = ctypes.cast(ctypes.byref(ev),
ctypes.POINTER(xi.XProximityNotifyEvent)).contents
self.dispatch_instance_event(e, 'on_proximity_in')
@pyglet.window.xlib.XlibEventHandler(-1)
def _event_xinput_proximity_out(self, ev):
e = ctypes.cast(ctypes.byref(ev),
ctypes.POINTER(xi.XProximityNotifyEvent)).contents
self.dispatch_instance_event(e, 'on_proximity_out')
def _check_extension(display):
major_opcode = ctypes.c_int()
first_event = ctypes.c_int()
first_error = ctypes.c_int()
xlib.XQueryExtension(display._display, 'XInputExtension',
ctypes.byref(major_opcode),
ctypes.byref(first_event),
ctypes.byref(first_error))
if not major_opcode.value:
raise Exception('XInput extension not available')
def get_devices(display):
_check_extension(display)
devices = list()
count = ctypes.c_int(0)
device_list = xi.XListInputDevices(display._display, count)
for i in range(count.value):
device_info = device_list[i]
devices.append(XInputDevice(display, device_info))
return devices
| bitcraft/pyglet | contrib/experimental/input/xinput.py | Python | bsd-3-clause | 9,260 |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from wtforms import validators
from ..forms import ModelForm
from digits import utils
class ImageModelForm(ModelForm):
"""
Defines the form used to create a new ImageModelJob
"""
crop_size = utils.forms.IntegerField(
'Crop Size',
validators=[
validators.NumberRange(min=1),
validators.Optional()
],
tooltip=("If specified, during training a random square crop will be "
"taken from the input image before using as input for the network.")
)
use_mean = utils.forms.SelectField(
'Subtract Mean',
choices=[
('none', 'None'),
('image', 'Image'),
('pixel', 'Pixel'),
],
default='image',
tooltip="Subtract the mean file or mean pixel for this dataset from each image."
)
aug_flip = utils.forms.SelectField(
'Flipping',
choices=[
('none', 'None'),
('fliplr', 'Horizontal'),
('flipud', 'Vertical'),
('fliplrud', 'Horizontal and/or Vertical'),
],
default='none',
tooltip="Randomly flips each image during batch preprocessing."
)
aug_quad_rot = utils.forms.SelectField(
'Quadrilateral Rotation',
choices=[
('none', 'None'),
('rot90', '0, 90 or 270 degrees'),
('rot180', '0 or 180 degrees'),
('rotall', '0, 90, 180 or 270 degrees.'),
],
default='none',
tooltip="Randomly rotates (90 degree steps) each image during batch preprocessing."
)
aug_rot = utils.forms.IntegerField(
'Rotation (+- deg)',
default=0,
validators=[
validators.NumberRange(min=0, max=180)
],
tooltip="The uniform-random rotation angle that will be performed during batch preprocessing."
)
aug_scale = utils.forms.FloatField(
'Rescale (stddev)',
default=0,
validators=[
validators.NumberRange(min=0, max=1)
],
tooltip=("Retaining image size, the image is rescaled with a "
"+-stddev of this parameter. Suggested value is 0.07.")
)
aug_noise = utils.forms.FloatField(
'Noise (stddev)',
default=0,
validators=[
validators.NumberRange(min=0, max=1)
],
tooltip=("Adds AWGN (Additive White Gaussian Noise) during batch "
"preprocessing, assuming [0 1] pixel-value range. Suggested value is 0.03.")
)
aug_hsv_use = utils.forms.BooleanField(
'HSV Shifting',
default=False,
tooltip=("Augmentation by normal-distributed random shifts in HSV "
"color space, assuming [0 1] pixel-value range."),
)
aug_hsv_h = utils.forms.FloatField(
'Hue',
default=0.02,
validators=[
validators.NumberRange(min=0, max=0.5)
],
tooltip=("Standard deviation of a shift that will be performed during "
"preprocessing, assuming [0 1] pixel-value range.")
)
aug_hsv_s = utils.forms.FloatField(
'Saturation',
default=0.04,
validators=[
validators.NumberRange(min=0, max=0.5)
],
tooltip=("Standard deviation of a shift that will be performed during "
"preprocessing, assuming [0 1] pixel-value range.")
)
aug_hsv_v = utils.forms.FloatField(
'Value',
default=0.06,
validators=[
validators.NumberRange(min=0, max=0.5)
],
tooltip=("Standard deviation of a shift that will be performed during "
"preprocessing, assuming [0 1] pixel-value range.")
)
| Deepomatic/DIGITS | digits/model/images/forms.py | Python | bsd-3-clause | 3,851 |
from setuptools import setup, find_packages
import os
# The version of the wrapped library is the starting point for the
# version number of the python package.
# In bugfix releases of the python package, add a '-' suffix and an
# incrementing integer.
# For example, a packaging bugfix release version 1.4.4 of the
# js.jquery package would be version 1.4.4-1 .
version = '0.9.7rt'
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
long_description = (
read('README.txt')
+ '\n' +
read('js', 'chosen', 'test_chosen.txt')
+ '\n' +
read('CHANGES.txt'))
setup(
name='js.chosen',
version=version,
description="Fanstatic packaging of Chosen",
long_description=long_description,
classifiers=[],
keywords='',
author='Fanstatic Developers',
author_email='[email protected]',
license='BSD',
packages=find_packages(),namespace_packages=['js'],
include_package_data=True,
zip_safe=False,
install_requires=[
'fanstatic',
'js.jquery',
'setuptools',
],
entry_points={
'fanstatic.libraries': [
'chosen = js.chosen:library',
],
},
)
| mmariani/js.chosen | setup.py | Python | bsd-3-clause | 1,226 |
"""myproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| greven/vagrant-django | project_name/urls.py | Python | bsd-3-clause | 993 |
__author__ = 'Cedric Da Costa Faro'
from flask import render_template
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(405)
def method_not_allowed(e):
return render_template('405.html'), 405
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
| cdcf/time_tracker | app/main/errors.py | Python | bsd-3-clause | 392 |
"""
PostgreSQL Session API
======================
The Session classes wrap the Queries :py:class:`Session <queries.Session>` and
:py:class:`TornadoSession <queries.tornado_session.TornadoSession>` classes
providing environment variable based configuration.
Environment variables should be set using the ``PGSQL[_DBNAME]`` format
where the value is a PostgreSQL URI.
For PostgreSQL URI format, see:
http://www.postgresql.org/docs/9.3/static/libpq-connect.html#LIBPQ-CONNSTRING
As example, given the environment variable:
.. code:: python
PGSQL_FOO = 'postgresql://bar:baz@foohost:6000/foo'
and code for creating a :py:class:`Session` instance for the database name
``foo``:
.. code:: python
session = sprockets.postgresql.Session('foo')
A :py:class:`queries.Session` object will be created that connects to Postgres
running on ``foohost``, port ``6000`` using the username ``bar`` and the
password ``baz``, connecting to the ``foo`` database.
"""
version_info = (2, 0, 1)
__version__ = '.'.join(str(v) for v in version_info)
import logging
import os
from queries import pool
import queries
from queries import tornado_session
_ARGUMENTS = ['host', 'port', 'dbname', 'user', 'password']
LOGGER = logging.getLogger(__name__)
# For ease of access to different cursor types
from queries import DictCursor
from queries import NamedTupleCursor
from queries import RealDictCursor
from queries import LoggingCursor
from queries import MinTimeLoggingCursor
# Expose exceptions so clients do not need to import queries as well
from queries import DataError
from queries import DatabaseError
from queries import IntegrityError
from queries import InterfaceError
from queries import InternalError
from queries import NotSupportedError
from queries import OperationalError
from queries import ProgrammingError
from queries import QueryCanceledError
from queries import TransactionRollbackError
def _get_uri(dbname):
"""Return the URI for the specified database name from an environment
variable. If dbname is blank, the ``PGSQL`` environment variable is used,
otherwise the database name is cast to upper case and concatenated to
``PGSQL_`` and the URI is retrieved from ``PGSQL_DBNAME``. For example,
if the value ``foo`` is passed in, the environment variable used would be
``PGSQL_FOO``.
:param str dbname: The database name to construct the URI for
:return: str
:raises: KeyError
"""
if not dbname:
return os.environ['PGSQL']
return os.environ['PGSQL_{0}'.format(dbname).upper()]
class Session(queries.Session):
"""Extends queries.Session using configuration data that is stored
in environment variables.
Utilizes connection pooling to ensure that multiple concurrent asynchronous
queries do not block each other. Heavily trafficked services will require
a higher ``max_pool_size`` to allow for greater connection concurrency.
:param str dbname: PostgreSQL database name
:param queries.cursor: The cursor type to use
:param int pool_idle_ttl: How long idle pools keep connections open
:param int pool_max_size: The maximum size of the pool to use
:param str db_url: Optional database connection URL. Use this when
you need to connect to a database that is only known at runtime.
"""
def __init__(self, dbname,
cursor_factory=queries.RealDictCursor,
pool_idle_ttl=pool.DEFAULT_IDLE_TTL,
pool_max_size=pool.DEFAULT_MAX_SIZE,
db_url=None):
if db_url is None:
db_url = _get_uri(dbname)
super(Session, self).__init__(db_url,
cursor_factory,
pool_idle_ttl,
pool_max_size)
class TornadoSession(tornado_session.TornadoSession):
"""Extends queries.TornadoSession using configuration data that is stored
in environment variables.
Utilizes connection pooling to ensure that multiple concurrent asynchronous
queries do not block each other. Heavily trafficked services will require
a higher ``max_pool_size`` to allow for greater connection concurrency.
:py:meth:`query <queries.tornado_session.TornadoSession.query>` and
:py:meth:`callproc <queries.tornado_session.TornadoSession.callproc>` must
call :py:meth:`Results.free <queries.tornado_session.Results.free>`
:param str dbname: PostgreSQL database name
:param queries.cursor: The cursor type to use
:param int pool_idle_ttl: How long idle pools keep connections open
:param int pool_max_size: The maximum size of the pool to use
:param tornado.ioloop.IOLoop ioloop: Pass in the instance of the tornado
IOLoop you would like to use. Defaults to the global instance.
:param str db_url: Optional database connection URL. Use this when
you need to connect to a database that is only known at runtime.
"""
def __init__(self, dbname,
cursor_factory=queries.RealDictCursor,
pool_idle_ttl=pool.DEFAULT_IDLE_TTL,
pool_max_size=tornado_session.DEFAULT_MAX_POOL_SIZE,
io_loop=None, db_url=None):
if db_url is None:
db_url = _get_uri(dbname)
super(TornadoSession, self).__init__(db_url,
cursor_factory,
pool_idle_ttl,
pool_max_size,
io_loop)
| sprockets/sprockets.clients.postgresql | sprockets/clients/postgresql/__init__.py | Python | bsd-3-clause | 5,585 |
__author__ = 'Robert Meyer'
try:
import scoop
except ImportError:
scoop = None
def scoop_not_functional_check():
if scoop is not None and scoop.IS_RUNNING:
print('SCOOP mode functional!')
return False
else:
print('SCOOP NOT running!')
return True
from pypet.tests.integration.environment_test import EnvironmentTest, ResultSortTest
from pypet.tests.integration.environment_multiproc_test import check_nice
import pypet.pypetconstants as pypetconstants
from pypet.tests.testutils.ioutils import parse_args, run_suite
from pypet.tests.testutils.data import unittest
@unittest.skipIf(scoop_not_functional_check(), 'Only makes sense if scoop is installed and running')
class MultiprocSCOOPNetqueueTest(EnvironmentTest):
tags = 'integration', 'hdf5', 'environment', 'multiproc', 'netqueue', 'scoop'
def set_mode(self):
super(MultiprocSCOOPNetqueueTest, self).set_mode()
self.mode = pypetconstants.WRAP_MODE_NETQUEUE
self.multiproc = True
self.freeze_input = False
self.ncores = 4
self.gc_interval = 3
self.niceness = check_nice(1)
self.use_pool = False
self.use_scoop = True
self.graceful_exit = False
@unittest.skip('Does not work with scoop (fully), because scoop uses main frame.')
def test_niceness(self):
pass
# def test_run(self):
# return super(MultiprocSCOOPLocalTest, self).test_run()
@unittest.skipIf(scoop_not_functional_check(), 'Only makes sense if scoop is installed')
class MultiprocSCOOPSortLocalTest(ResultSortTest):
tags = 'integration', 'hdf5', 'environment', 'multiproc', 'local', 'scoop'
def set_mode(self):
super(MultiprocSCOOPSortLocalTest, self).set_mode()
self.mode = pypetconstants.WRAP_MODE_LOCAL
self.freeze_input = False
self.multiproc = True
self.ncores = 4
self.use_pool = False
self.use_scoop = True
self.graceful_exit = False
@unittest.skip('Does not work with SCOOP')
def test_graceful_exit(self):
pass
@unittest.skipIf(scoop_not_functional_check(), 'Only makes sense if scoop is installed')
class MultiprocFrozenSCOOPLocalTest(EnvironmentTest):
tags = 'integration', 'hdf5', 'environment', 'multiproc', 'local', 'scoop', 'freeze_input'
def set_mode(self):
super(MultiprocFrozenSCOOPLocalTest, self).set_mode()
self.mode = pypetconstants.WRAP_MODE_LOCAL
self.multiproc = True
self.freeze_input = True
self.ncores = 4
self.gc_interval = 3
self.niceness = check_nice(1)
self.use_pool = False
self.use_scoop = True
self.graceful_exit = False
@unittest.skip('Does not work with scoop (fully), because scoop uses main frame.')
def test_niceness(self):
pass
# def test_run(self):
# return super(MultiprocSCOOPLocalTest, self).test_run()
# @unittest.skipIf(scoop is None, 'Only makes sense if scoop is installed')
# class MultiprocFrozenSCOOPSortLocalTest(ResultSortTest):
#
# tags = 'integration', 'hdf5', 'environment', 'multiproc', 'local', 'scoop', 'freeze_input'
#
# def set_mode(self):
# super(MultiprocFrozenSCOOPSortLocalTest, self).set_mode()
# self.mode = pypetconstants.WRAP_MODE_LOCAL
# self.freeze_input = True
# self.multiproc = True
# self.ncores = 4
# self.use_pool = False
# self.use_scoop = True
@unittest.skipIf(scoop_not_functional_check(), 'Only makes sense if scoop is installed')
class MultiprocFrozenSCOOPSortNetlockTest(ResultSortTest):
tags = 'integration', 'hdf5', 'environment', 'multiproc', 'netlock', 'scoop', 'freeze_input'
def set_mode(self):
super(MultiprocFrozenSCOOPSortNetlockTest, self).set_mode()
self.mode = pypetconstants.WRAP_MODE_NETLOCK
self.freeze_input = True
self.multiproc = True
self.ncores = 4
self.use_pool = False
self.use_scoop = True
self.port = (10000, 60000)
self.graceful_exit = False
@unittest.skip('Does not work with SCOOP')
def test_graceful_exit(self):
pass
@unittest.skipIf(scoop_not_functional_check(), 'Only makes sense if scoop is installed')
class MultiprocFrozenSCOOPSortNetqueueTest(ResultSortTest):
tags = 'integration', 'hdf5', 'environment', 'multiproc', 'netqueue', 'scoop', 'freeze_input', 'mehmet'
def set_mode(self):
super(MultiprocFrozenSCOOPSortNetqueueTest, self).set_mode()
self.mode = pypetconstants.WRAP_MODE_NETQUEUE
self.freeze_input = True
self.multiproc = True
self.ncores = 4
self.use_pool = False
self.use_scoop = True
self.graceful_exit = False
#self.port = 'tcp://127.0.0.1:22334'
@unittest.skip('Does not work with SCOOP')
def test_graceful_exit(self):
pass
# @unittest.skipIf(scoop is None, 'Only makes sense if scoop is installed')
# class MultiprocSCOOPNetqueueTest(EnvironmentTest):
#
# tags = 'integration', 'hdf5', 'environment', 'multiproc', 'netqueue', 'scoop'
#
# def set_mode(self):
# super(MultiprocSCOOPNetqueueTest, self).set_mode()
# self.mode = pypetconstants.WRAP_MODE_NETQUEUE
# self.multiproc = True
# self.freeze_input = False
# self.ncores = 4
# self.gc_interval = 3
# self.niceness = check_nice(1)
# self.use_pool = False
# self.use_scoop = True
# self.port = None
# self.timeout = 9999.99
@unittest.skipIf(scoop_not_functional_check(), 'Only makes sense if scoop is installed')
class MultiprocSCOOPNetlockTest(EnvironmentTest):
tags = 'integration', 'hdf5', 'environment', 'multiproc', 'netlock', 'scoop'
def set_mode(self):
super(MultiprocSCOOPNetlockTest, self).set_mode()
self.mode = pypetconstants.WRAP_MODE_NETLOCK
self.multiproc = True
self.freeze_input = False
self.ncores = 4
self.gc_interval = 3
self.niceness = check_nice(1)
self.use_pool = False
self.use_scoop = True
self.port = None
self.timeout = 1099.99
self.graceful_exit = False
# self.port = 'tcp://127.0.0.1:22334'
@unittest.skip('Does not work with scoop (fully), because scoop uses main frame.')
def test_niceness(self):
pass
if __name__ == '__main__':
opt_args = parse_args()
run_suite(**opt_args)
| SmokinCaterpillar/pypet | pypet/tests/integration/environment_scoop_test.py | Python | bsd-3-clause | 6,504 |
"""
These URL patterns are included in two different ways in the main urls.py, with
an extra argument present in one case. Thus, there are two different ways for
each name to resolve and Django must distinguish the possibilities based on the
argument list.
"""
from django.conf.urls import url
from .views import empty_view
urlpatterns = [
url(r'^part/(?P<value>\w+)/$', empty_view, name="part"),
url(r'^part2/(?:(?P<value>\w+)/)?$', empty_view, name="part2"),
]
| yephper/django | tests/urlpatterns_reverse/included_urls2.py | Python | bsd-3-clause | 489 |
#!/usr/bin/env python
"""
New Drawing class to create new mark and style on axes.
"""
# from copy import deepcopy, copy
from decimal import Decimal
import numpy as np
import toyplot
# from .Admixture import AdmixEdges
# for setting values from iterables
ITERABLE = (list, tuple, np.ndarray)
class GridSetup:
"""
Returns Canvas and Cartesian axes objects to fit a grid of trees.
"""
def __init__(self, nrows, ncols, width, height, layout):
# style args can include height/width, nrows, ncols, shared,...
self.nrows = nrows
self.ncols = ncols
self.width = width
self.height = height
self.layout = layout
# get .canvas and .axes
self.get_tree_dims()
self.get_canvas_and_axes()
def get_canvas_and_axes(self):
"""
Set .canvas and .axes objects
"""
self.canvas = toyplot.Canvas(
height=self.height,
width=self.width,
)
self.axes = [
self.canvas.cartesian(
grid=(self.nrows, self.ncols, i),
padding=10,
margin=25,
)
for i in range(self.nrows * self.ncols)
]
def get_tree_dims(self):
"""
get height and width if not set by user
"""
if self.ncols * self.nrows < 4:
minx = 250
miny = 250
else:
minx = 200
miny = 140
# wider than tall
if self.layout in ("d", "u"):
self.width = (
self.width if self.width
else min(750, minx * self.ncols)
)
self.height = (
self.height if self.height
else min(750, miny * self.nrows)
)
else:
self.height = (
self.height if self.height
else min(750, minx * self.nrows)
)
self.width = (
self.width if self.width
else min(750, miny * self.ncols)
)
class CanvasSetup:
"""
Returns Canvas and Cartesian axes objects
"""
def __init__(self, tree, axes, style):
# args includes axes
self.tree = tree
self.axes = axes
self.style = style
self.canvas = None
self.external_axis = False
# get the longest name for dimension fitting
self.lname = 0
if not all([i is None for i in self.style.tip_labels]):
self.lname = max([len(str(i)) for i in self.style.tip_labels])
# ntips and shape to fit with provided args
self.get_dims_from_tree_size()
# fills canvas and axes
self.get_canvas_and_axes()
# expand the domain/extents for the text
# self.fit_tip_labels()
# ticks for tree and scalebar
self.add_axes_style()
def get_dims_from_tree_size(self):
"""
Calculate reasonable canvas height and width for tree given N tips
"""
if self.style.layout == "c":
radius = max(
[0] + [i for i in [self.style.height, self.style.width] if i])
if not radius:
radius = 400
self.style.width = self.style.height = radius
return
if self.style.layout in ("r", "l"):
# height fit by tree size
if not self.style.height:
self.style.height = max(275, min(1000, 18 * self.tree.ntips))
# width fit by name size
if not self.style.width:
self.style.width = max(250, min(500, 250 + 5 * self.lname))
else:
# height fit by name size
if not self.style.height:
self.style.height = max(250, min(500, 250 + 5 * self.lname))
# width fit by tree size
if not self.style.width:
self.style.width = max(350, min(1000, 18 * self.tree.ntips))
def get_canvas_and_axes(self):
"""
"""
if self.axes is not None:
self.canvas = None
self.external_axis = True
else:
self.canvas = toyplot.Canvas(
height=self.style.height,
width=self.style.width,
)
self.axes = self.canvas.cartesian(
padding=self.style.padding
)
def add_axes_style(self):
"""
"""
# style axes with padding and show axes
self.axes.padding = self.style.padding
if not self.external_axis:
self.axes.show = True
if not self.style.scalebar:
self.axes.show = False
# scalebar
if self.style.scalebar:
if self.style.layout in ("r", "l"):
nticks = max((3, np.floor(self.style.width / 100).astype(int)))
self.axes.y.show = False
self.axes.x.show = True
self.axes.x.ticks.show = True
# generate locations
if self.style.use_edge_lengths:
th = self.tree.treenode.height
else:
th = self.tree.treenode.get_farthest_leaf(True)[1] + 1
if self.style.layout == "r":
top = self.style.xbaseline - th
else:
top = self.style.xbaseline + th
locs = np.linspace(self.style.xbaseline, top, nticks)
# auto-formatter for axes ticks labels
zer = abs(min(0, Decimal(locs[1]).adjusted()))
fmt = "{:." + str(zer) + "f}"
self.axes.x.ticks.locator = toyplot.locator.Explicit(
locations=locs,
labels=[fmt.format(i) for i in np.abs(locs)],
)
elif self.style.layout in ("u", "d"):
nticks = max((3, np.floor(self.style.height / 100).astype(int)))
self.axes.x.show = False
self.axes.y.show = True
self.axes.y.ticks.show = True
# generate locations
if self.style.use_edge_lengths:
th = self.tree.treenode.height
else:
th = self.tree.treenode.get_farthest_leaf(True)[1] + 1
if self.style.layout == "d":
top = self.style.ybaseline + th
else:
top = self.style.ybaseline - th
locs = np.linspace(self.style.ybaseline, top, nticks)
# auto-formatter for axes ticks labels
zer = abs(min(0, Decimal(locs[1]).adjusted()))
fmt = "{:." + str(zer) + "f}"
self.axes.y.ticks.locator = toyplot.locator.Explicit(
locations=locs,
labels=[fmt.format(i) for i in np.abs(locs)],
)
# elif self.style.layout == "d":
# nticks = max((3, np.floor(self.style.height / 100).astype(int)))
# self.axes.x.show = False
# self.axes.y.show = True
# self.axes.y.ticks.show = True
# # generate locations
# locs = np.linspace(0, self.tree.treenode.height, nticks)
# # auto-formatter for axes ticks labels
# zer = abs(min(0, Decimal(locs[1]).adjusted()))
# fmt = "{:." + str(zer) + "f}"
# self.axes.y.ticks.locator = toyplot.locator.Explicit(
# locations=locs,
# labels=[fmt.format(i) for i in np.abs(locs)],
# )
# def fit_tip_labels(self):
# """
# DEPRECATED SINCE V2 since Mark now sets its own extents correctly.
# Modifies display range to ensure tip labels fit. This is a bit hackish
# still. The problem is that the 'extents' range of the rendered text
# is not totally correct. So we add a little buffer here. Should add for
# user to be able to modify this if needed. If not using edge lengths
# then need to use unit length for treeheight.
# """
# # bail on unrooted for now; TODO
# if self.style.layout == "c":
# return
# # if names
# if self.lname:
# # get ratio of names to tree in plot
# ratio = max(self.lname / 10, 0.15)
# # have tree figure make up 85% of plot
# if self.style.use_edge_lengths:
# addon = self.tree.treenode.height
# else:
# addon = self.tree.treenode.get_farthest_leaf(True)[1] + 1
# addon *= ratio
# # modify display for layout
# if self.style.layout == "r":
# self.axes.x.domain.max = (addon / 2.) + self.style.xbaseline
# elif self.style.layout == "l":
# self.axes.x.domain.min = (-addon / 2.) + self.style.xbaseline
# # self.axes.x.domain.min -= self.style.xbaseline
# elif self.style.layout == "d":
# self.axes.y.domain.min = (-addon / 2.) + self.style.ybaseline
# elif self.style.layout == "u":
# self.axes.y.domain.max = (addon / 2.) + self.style.ybaseline
# # print(addon, ratio, self.axes.x.domain.min, self.axes.x.domain.max)
| eaton-lab/toytree | toytree/CanvasSetup.py | Python | bsd-3-clause | 9,455 |
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy
import scipy.stats
from collections import defaultdict
def scores_to_probs(scores):
scores = numpy.array(scores)
scores -= scores.max()
probs = numpy.exp(scores, out=scores)
probs /= probs.sum()
return probs
def score_to_empirical_kl(score, count):
"""
Convert total log score to KL( empirical || model ),
where the empirical pdf is uniform over `count` datapoints.
"""
count = float(count)
return -score / count - numpy.log(count)
def print_histogram(probs, counts):
WIDTH = 60.0
max_count = max(counts)
print '{: >8} {: >8}'.format('Prob', 'Count')
for prob, count in sorted(zip(probs, counts), reverse=True):
width = int(round(WIDTH * count / max_count))
print '{: >8.3f} {: >8d} {}'.format(prob, count, '-' * width)
def multinomial_goodness_of_fit(
probs,
counts,
total_count,
truncated=False,
plot=False):
"""
Pearson's chi^2 test, on possibly truncated data.
http://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
Returns:
p-value of truncated multinomial sample.
"""
assert len(probs) == len(counts)
assert truncated or total_count == sum(counts)
chi_squared = 0
dof = 0
if plot:
print_histogram(probs, counts)
for p, c in zip(probs, counts):
if p == 1:
return 1 if c == total_count else 0
assert p < 1, 'bad probability: %g' % p
if p > 0:
mean = total_count * p
variance = total_count * p * (1 - p)
assert variance > 1,\
'WARNING goodness of fit is inaccurate; use more samples'
chi_squared += (c - mean) ** 2 / variance
dof += 1
else:
print 'WARNING zero probability in goodness-of-fit test'
if c > 0:
return float('inf')
if not truncated:
dof -= 1
survival = scipy.stats.chi2.sf(chi_squared, dof)
return survival
def unif01_goodness_of_fit(samples, plot=False):
"""
Bin uniformly distributed samples and apply Pearson's chi^2 test.
"""
samples = numpy.array(samples, dtype=float)
assert samples.min() >= 0.0
assert samples.max() <= 1.0
bin_count = int(round(len(samples) ** 0.333))
assert bin_count >= 7, 'WARNING imprecise test, use more samples'
probs = numpy.ones(bin_count, dtype=numpy.float) / bin_count
counts = numpy.zeros(bin_count, dtype=numpy.int)
for sample in samples:
counts[int(bin_count * sample)] += 1
return multinomial_goodness_of_fit(probs, counts, len(samples), plot=plot)
def density_goodness_of_fit(samples, probs, plot=False):
"""
Transform arbitrary continuous samples to unif01 distribution
and assess goodness of fit via Pearson's chi^2 test.
Inputs:
samples - a list of real-valued samples from a distribution
probs - a list of probability densities evaluated at those samples
"""
assert len(samples) == len(probs)
assert len(samples) > 100, 'WARNING imprecision; use more samples'
pairs = zip(samples, probs)
pairs.sort()
samples = numpy.array([x for x, p in pairs])
probs = numpy.array([p for x, p in pairs])
density = numpy.sqrt(probs[1:] * probs[:-1])
gaps = samples[1:] - samples[:-1]
unif01_samples = 1.0 - numpy.exp(-len(samples) * gaps * density)
return unif01_goodness_of_fit(unif01_samples, plot=plot)
def discrete_goodness_of_fit(
samples,
probs_dict,
truncate_beyond=8,
plot=False):
"""
Transform arbitrary discrete data to multinomial
and assess goodness of fit via Pearson's chi^2 test.
"""
assert len(samples) > 100, 'WARNING imprecision; use more samples'
counts = defaultdict(lambda: 0)
for sample in samples:
assert sample in probs_dict
counts[sample] += 1
items = [(prob, counts.get(i, 0)) for i, prob in probs_dict.iteritems()]
items.sort(reverse=True)
truncated = (truncate_beyond and truncate_beyond < len(items))
if truncated:
items = items[:truncate_beyond]
probs = [prob for prob, count in items]
counts = [count for prob, count in items]
return multinomial_goodness_of_fit(
probs,
counts,
len(samples),
truncated=truncated,
plot=plot)
def bin_samples(samples, k=10, support=[]):
"""
Bins a collection of univariate samples into k bins of equal
fill via the empirical cdf, to be used in goodness of fit testing.
Returns
counts : array k x 1
bin_ranges : arrary k x 2
each count is the number of samples in [bin_min, bin_max)
except for the last bin which is [bin_min, bin_max]
list partitioning algorithm adapted from Mark Dickinson:
http://stackoverflow.com/questions/2659900
"""
samples = sorted(samples)
N = len(samples)
q, r = divmod(N, k)
#we need to distribute the remainder relatively evenly
#tests will be inaccurate if we have small bins at the end
indices = [i * q + min(r, i) for i in range(k + 1)]
bins = [samples[indices[i]: indices[i + 1]] for i in range(k)]
bin_ranges = []
counts = []
for i in range(k):
bin_min = bins[i][0]
try:
bin_max = bins[i + 1][0]
except IndexError:
bin_max = bins[i][-1]
bin_ranges.append([bin_min, bin_max])
counts.append(len(bins[i]))
if support:
bin_ranges[0][0] = support[0]
bin_ranges[-1][1] = support[1]
return numpy.array(counts), numpy.array(bin_ranges)
def histogram(samples, bin_count=None):
if bin_count is None:
bin_count = numpy.max(samples) + 1
v = numpy.zeros(bin_count, dtype=int)
for sample in samples:
v[sample] += 1
return v
| forcedotcom/distributions | distributions/util.py | Python | bsd-3-clause | 7,375 |
"""
Read a dictionary from a JSON file,
and add its contents to a Python dictionary.
"""
import json
import types
from instmakelib import rtimport
INSTMAKE_SITE_DIR = "instmakesite"
# These are the supported field names
# ===================================
# The name of the plugin (without ".py") for logging
# usage of instmake
CONFIG_USAGE_LOGGER = "usage-logger"
# The name of the plugin (without ".py") for normalizing
# path names in the clidiff report.
CONFIG_CLIDIFF_NORMPATH = "clidiff-normpath"
def update(caller_config, json_filename):
# This will throw errors
fh = open(json_filename)
file_config = json.load(fh)
fh.close()
assert type(file_config) == types.DictType
caller_config.update(file_config)
def load_site_plugin(name):
"""Import a plugin from the instmakesite directory.
The import can throw exceptions that the caller has to
catch."""
plugin_name = INSTMAKE_SITE_DIR + "." + name
return rtimport.rtimport(plugin_name)
| gilramir/instmake | instmakelib/jsonconfig.py | Python | bsd-3-clause | 999 |
''' Renderers for various kinds of annotations that can be added to
Bokeh plots
'''
from __future__ import absolute_import
from six import string_types
from ..core.enums import (AngleUnits, Dimension, FontStyle, LegendClickPolicy, LegendLocation,
Orientation, RenderMode, SpatialUnits, VerticalAlign, TextAlign)
from ..core.has_props import abstract
from ..core.properties import (Angle, AngleSpec, Auto, Bool, ColorSpec, Datetime, Dict, DistanceSpec, Either,
Enum, Float, FontSizeSpec, Include, Instance, Int, List, NumberSpec, Override,
Seq, String, StringSpec, Tuple, value)
from ..core.property_mixins import FillProps, LineProps, TextProps
from ..core.validation import error
from ..core.validation.errors import BAD_COLUMN_NAME, NON_MATCHING_DATA_SOURCES_ON_LEGEND_ITEM_RENDERERS
from ..model import Model
from ..util.serialization import convert_datetime_type
from .formatters import BasicTickFormatter, TickFormatter
from .mappers import ContinuousColorMapper
from .renderers import GlyphRenderer, Renderer
from .sources import ColumnDataSource, DataSource
from .tickers import BasicTicker, Ticker
@abstract
class Annotation(Renderer):
''' Base class for all annotation models.
'''
plot = Instance(".models.plots.Plot", help="""
The plot to which this annotation is attached.
""")
level = Override(default="annotation")
@abstract
class TextAnnotation(Annotation):
''' Base class for text annotation models such as labels and titles.
'''
class LegendItem(Model):
'''
'''
def __init__(self, *args, **kwargs):
super(LegendItem, self).__init__(*args, **kwargs)
if isinstance(self.label, string_types):
# Allow convenience of setting label as a string
self.label = value(self.label)
label = StringSpec(default=None, help="""
A label for this legend. Can be a string, or a column of a
ColumnDataSource. If ``label`` is a field, then it must
be in the renderers' data_source.
""")
renderers = List(Instance(GlyphRenderer), help="""
A list of the glyph renderers to draw in the legend. If ``label`` is a field,
then all data_sources of renderers must be the same.
""")
@error(NON_MATCHING_DATA_SOURCES_ON_LEGEND_ITEM_RENDERERS)
def _check_data_sources_on_renderers(self):
if self.label and 'field' in self.label:
if len({r.data_source for r in self.renderers}) != 1:
return str(self)
@error(BAD_COLUMN_NAME)
def _check_field_label_on_data_source(self):
if self.label and 'field' in self.label:
if len(self.renderers) < 1:
return str(self)
source = self.renderers[0].data_source
if self.label.get('field') not in source.column_names:
return str(self)
class Legend(Annotation):
''' Render informational legends for a plot.
'''
location = Either(Enum(LegendLocation), Tuple(Float, Float), default="top_right", help="""
The location where the legend should draw itself. It's either one of
``bokeh.core.enums.LegendLocation``'s enumerated values, or a ``(x, y)``
tuple indicating an absolute location absolute location in screen
coordinates (pixels from the bottom-left corner).
""")
orientation = Enum(Orientation, default="vertical", help="""
Whether the legend entries should be placed vertically or horizontally
when they are drawn.
""")
border_props = Include(LineProps, help="""
The %s for the legend border outline.
""")
border_line_color = Override(default="#e5e5e5")
border_line_alpha = Override(default=0.5)
background_props = Include(FillProps, help="""
The %s for the legend background style.
""")
inactive_props = Include(FillProps, help="""
The %s for the legend background style when inactive.
""")
click_policy = Enum(LegendClickPolicy, default="none", help="""
Defines what happens when a lengend's item is clicked.
""")
background_fill_color = Override(default="#ffffff")
background_fill_alpha = Override(default=0.95)
inactive_fill_color = Override(default="white")
inactive_fill_alpha = Override(default=0.9)
label_props = Include(TextProps, help="""
The %s for the legend labels.
""")
label_text_baseline = Override(default='middle')
label_text_font_size = Override(default={'value': '10pt'})
label_standoff = Int(5, help="""
The distance (in pixels) to separate the label from its associated glyph.
""")
label_height = Int(20, help="""
The minimum height (in pixels) of the area that legend labels should occupy.
""")
label_width = Int(20, help="""
The minimum width (in pixels) of the area that legend labels should occupy.
""")
glyph_height = Int(20, help="""
The height (in pixels) that the rendered legend glyph should occupy.
""")
glyph_width = Int(20, help="""
The width (in pixels) that the rendered legend glyph should occupy.
""")
margin = Int(10, help="""
Amount of margin around the legend.
""")
padding = Int(10, help="""
Amount of padding around the contents of the legend. Only applicable when
when border is visible, otherwise collapses to 0.
""")
spacing = Int(3, help="""
Amount of spacing (in pixles) between legend entries.
""")
items = List(Instance(LegendItem), help="""
A list of :class:`~bokeh.model.annotations.LegendItem` instances to be
rendered in the legend.
This can be specified explicitly, for instance:
.. code-block:: python
legend = Legend(items=[
LegendItem(label="sin(x)" , renderers=[r0, r1]),
LegendItem(label="2*sin(x)" , renderers=[r2]),
LegendItem(label="3*sin(x)" , renderers=[r3, r4])
])
But as a convenience, can also be given more compactly as a list of tuples:
.. code-block:: python
legend = Legend(items=[
("sin(x)" , [r0, r1]),
("2*sin(x)" , [r2]),
("3*sin(x)" , [r3, r4])
])
where each tuple is of the form: *(label, renderers)*.
""").accepts(List(Tuple(String, List(Instance(GlyphRenderer)))), lambda items: [LegendItem(label=item[0], renderers=item[1]) for item in items])
class ColorBar(Annotation):
''' Render a color bar based on a color mapper.
'''
location = Either(Enum(LegendLocation), Tuple(Float, Float),
default="top_right", help="""
The location where the color bar should draw itself. It's either one of
``bokeh.core.enums.LegendLocation``'s enumerated values, or a ``(x, y)``
tuple indicating an absolute location absolute location in screen
coordinates (pixels from the bottom-left corner).
.. warning::
If the color bar is placed in a side panel, the location will likely
have to be set to `(0,0)`.
""")
orientation = Enum(Orientation, default="vertical", help="""
Whether the color bar should be oriented vertically or horizontally.
""")
height = Either(Auto, Int(), help="""
The height (in pixels) that the color scale should occupy.
""")
width = Either(Auto, Int(), help="""
The width (in pixels) that the color scale should occupy.
""")
scale_alpha = Float(1.0, help="""
The alpha with which to render the color scale.
""")
title = String(help="""
The title text to render.
""")
title_props = Include(TextProps, help="""
The %s values for the title text.
""")
title_text_font_size = Override(default={'value': "10pt"})
title_text_font_style = Override(default="italic")
title_standoff = Int(2, help="""
The distance (in pixels) to separate the title from the color bar.
""")
ticker = Instance(Ticker, default=lambda: BasicTicker(), help="""
A Ticker to use for computing locations of axis components.
""")
formatter = Instance(TickFormatter, default=lambda: BasicTickFormatter(), help="""
A TickFormatter to use for formatting the visual appearance of ticks.
""")
major_label_overrides = Dict(Either(Float, String), String, default={}, help="""
Provide explicit tick label values for specific tick locations that
override normal formatting.
""")
color_mapper = Instance(ContinuousColorMapper, help="""
A continuous color mapper containing a color palette to render.
.. warning::
If the `low` and `high` attributes of the ColorMapper aren't set, ticks
and tick labels won't be rendered. Additionally, if a LogTicker is
passed to the `ticker` argument and either or both of the logarithms
of `low` and `high` values of the color_mapper are non-numeric
(i.e. `low=0`), the tick and tick labels won't be rendered.
""")
margin = Int(30, help="""
Amount of margin (in pixels) around the outside of the color bar.
""")
padding = Int(10, help="""
Amount of padding (in pixels) between the color scale and color bar border.
""")
major_label_props = Include(TextProps, help="""
The %s of the major tick labels.
""")
major_label_text_align = Override(default="center")
major_label_text_baseline = Override(default="middle")
major_label_text_font_size = Override(default={'value': "8pt"})
label_standoff = Int(5, help="""
The distance (in pixels) to separate the tick labels from the color bar.
""")
major_tick_props = Include(LineProps, help="""
The %s of the major ticks.
""")
major_tick_line_color = Override(default="#ffffff")
major_tick_in = Int(default=5, help="""
The distance (in pixels) that major ticks should extend into the
main plot area.
""")
major_tick_out = Int(default=0, help="""
The distance (in pixels) that major ticks should extend out of the
main plot area.
""")
minor_tick_props = Include(LineProps, help="""
The %s of the minor ticks.
""")
minor_tick_line_color = Override(default=None)
minor_tick_in = Int(default=0, help="""
The distance (in pixels) that minor ticks should extend into the
main plot area.
""")
minor_tick_out = Int(default=0, help="""
The distance (in pixels) that major ticks should extend out of the
main plot area.
""")
bar_props = Include(LineProps, help="""
The %s for the color scale bar outline.
""")
bar_line_color = Override(default=None)
border_props = Include(LineProps, help="""
The %s for the color bar border outline.
""")
border_line_color = Override(default=None)
background_props = Include(FillProps, help="""
The %s for the color bar background style.
""")
background_fill_color = Override(default="#ffffff")
background_fill_alpha = Override(default=0.95)
# This only exists to prevent a circular import.
def _DEFAULT_ARROW():
from .arrow_heads import OpenHead
return OpenHead()
class Arrow(Annotation):
''' Render an arrow as an annotation.
'''
x_start = NumberSpec(help="""
The x-coordinates to locate the start of the arrows.
""")
y_start = NumberSpec(help="""
The y-coordinates to locate the start of the arrows.
""")
start_units = Enum(SpatialUnits, default='data', help="""
The unit type for the start_x and start_y attributes. Interpreted as "data
space" units by default.
""")
start = Instance('.models.arrow_heads.ArrowHead', default=None, help="""
Instance of ArrowHead.
""")
x_end = NumberSpec(help="""
The x-coordinates to locate the end of the arrows.
""")
y_end = NumberSpec(help="""
The y-coordinates to locate the end of the arrows.
""")
end_units = Enum(SpatialUnits, default='data', help="""
The unit type for the end_x and end_y attributes. Interpreted as "data
space" units by default.
""")
end = Instance('.models.arrow_heads.ArrowHead', default=_DEFAULT_ARROW, help="""
Instance of ArrowHead.
""")
body_props = Include(LineProps, use_prefix=False, help="""
The %s values for the arrow body.
""")
source = Instance(DataSource, help="""
Local data source to use when rendering annotations on the plot.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default y-range.
""")
class BoxAnnotation(Annotation):
''' Render a shaded rectangular region as an annotation.
'''
left = Either(Auto, NumberSpec(), default=None, help="""
The x-coordinates of the left edge of the box annotation.
Datetime values are also accepted, but note that they are immediately
converted to milliseconds-since-epoch.
""")
left_units = Enum(SpatialUnits, default='data', help="""
The unit type for the left attribute. Interpreted as "data space" units
by default.
""")
right = Either(Auto, NumberSpec(), default=None, help="""
The x-coordinates of the right edge of the box annotation.
Datetime values are also accepted, but note that they are immediately
converted to milliseconds-since-epoch.
""")
right_units = Enum(SpatialUnits, default='data', help="""
The unit type for the right attribute. Interpreted as "data space" units
by default.
""")
bottom = Either(Auto, NumberSpec(), default=None, help="""
The y-coordinates of the bottom edge of the box annotation.
Datetime values are also accepted, but note that they are immediately
converted to milliseconds-since-epoch.
""")
bottom_units = Enum(SpatialUnits, default='data', help="""
The unit type for the bottom attribute. Interpreted as "data space" units
by default.
""")
top = Either(Auto, NumberSpec(), default=None, help="""
The y-coordinates of the top edge of the box annotation.
Datetime values are also accepted, but note that they are immediately
converted to milliseconds-since-epoch.
""")
top_units = Enum(SpatialUnits, default='data', help="""
The unit type for the top attribute. Interpreted as "data space" units
by default.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering box annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering box annotations on the plot. If unset, use the default y-range.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the box.
""")
line_alpha = Override(default=0.3)
line_color = Override(default="#cccccc")
fill_props = Include(FillProps, use_prefix=False, help="""
The %s values for the box.
""")
fill_alpha = Override(default=0.4)
fill_color = Override(default="#fff9ba")
render_mode = Enum(RenderMode, default="canvas", help="""
Specifies whether the box is rendered as a canvas element or as an
css element overlaid on the canvas. The default mode is "canvas".
.. warning::
The line_dash and line_dash_offset attributes aren't supported if
the render_mode is set to "css"
""")
class Band(Annotation):
''' Render a filled area band along a dimension.
'''
lower = DistanceSpec(help="""
The coordinates of the lower portion of the filled area band.
""")
upper = DistanceSpec(help="""
The coordinations of the upper portion of the filled area band.
""")
base = DistanceSpec(help="""
The orthogonal coordinates of the upper and lower values.
""")
dimension = Enum(Dimension, default='height', help="""
The direction of the band.
""")
source = Instance(DataSource, default=lambda: ColumnDataSource(), help="""
Local data source to use when rendering annotations on the plot.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default y-range.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the band.
""")
line_alpha = Override(default=0.3)
line_color = Override(default="#cccccc")
fill_props = Include(FillProps, use_prefix=False, help="""
The %s values for the band.
""")
fill_alpha = Override(default=0.4)
fill_color = Override(default="#fff9ba")
class Label(TextAnnotation):
''' Render a single text label as an annotation.
``Label`` will render a single text label at given ``x`` and ``y``
coordinates, which can be in either screen (pixel) space, or data (axis
range) space.
The label can also be configured with a screen space offset from ``x`` and
``y``, by using the ``x_offset`` and ``y_offset`` properties.
Additionally, the label can be rotated with the ``angle`` property.
There are also standard text, fill, and line properties to control the
appearance of the text, its background, as well as the rectangular bounding
box border.
'''
x = Float(help="""
The x-coordinate in screen coordinates to locate the text anchors.
Datetime values are also accepted, but note that they are immediately
converted to milliseconds-since-epoch.
""").accepts(Datetime, convert_datetime_type)
x_units = Enum(SpatialUnits, default='data', help="""
The unit type for the x attribute. Interpreted as "data space" units
by default.
""")
y = Float(help="""
The y-coordinate in screen coordinates to locate the text anchors.
Datetime values are also accepted, but note that they are immediately
converted to milliseconds-since-epoch.
""").accepts(Datetime, convert_datetime_type)
y_units = Enum(SpatialUnits, default='data', help="""
The unit type for the y attribute. Interpreted as "data space" units
by default.
""")
text = String(help="""
The text value to render.
""")
angle = Angle(default=0, help="""
The angle to rotate the text, as measured from the horizontal.
.. warning::
The center of rotation for canvas and css render_modes is different.
For `render_mode="canvas"` the label is rotated from the top-left
corner of the annotation, while for `render_mode="css"` the annotation
is rotated around it's center.
""")
angle_units = Enum(AngleUnits, default='rad', help="""
Acceptable values for units are ``"rad"`` and ``"deg"``
""")
x_offset = Float(default=0, help="""
Offset value to apply to the x-coordinate.
This is useful, for instance, if it is desired to "float" text a fixed
distance in screen units from a given data position.
""")
y_offset = Float(default=0, help="""
Offset value to apply to the y-coordinate.
This is useful, for instance, if it is desired to "float" text a fixed
distance in screen units from a given data position.
""")
# TODO (bev) these should probably not be dataspec properties
text_props = Include(TextProps, use_prefix=False, help="""
The %s values for the text.
""")
# TODO (bev) these should probably not be dataspec properties
background_props = Include(FillProps, use_prefix=True, help="""
The %s values for the text bounding box.
""")
background_fill_color = Override(default=None)
# TODO (bev) these should probably not be dataspec properties
border_props = Include(LineProps, use_prefix=True, help="""
The %s values for the text bounding box.
""")
border_line_color = Override(default=None)
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen location when
rendering an annotation on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen location when
rendering an annotation on the plot. If unset, use the default y-range.
""")
render_mode = Enum(RenderMode, default="canvas", help="""
Specifies whether the text is rendered as a canvas element or as an
css element overlaid on the canvas. The default mode is "canvas".
.. note::
The CSS labels won't be present in the output using the "save" tool.
.. warning::
Not all visual styling properties are supported if the render_mode is
set to "css". The border_line_dash property isn't fully supported and
border_line_dash_offset isn't supported at all. Setting text_alpha will
modify the opacity of the entire background box and border in addition
to the text. Finally, clipping Label annotations inside of the plot
area isn't supported in "css" mode.
""")
class LabelSet(TextAnnotation):
''' Render multiple text labels as annotations.
``LabelSet`` will render multiple text labels at given ``x`` and ``y``
coordinates, which can be in either screen (pixel) space, or data (axis
range) space. In this case (as opposed to the single ``Label`` model),
``x`` and ``y`` can also be the name of a column from a
:class:`~bokeh.models.sources.ColumnDataSource`, in which case the labels
will be "vectorized" using coordinate values from the specified columns.
The label can also be configured with a screen space offset from ``x`` and
``y``, by using the ``x_offset`` and ``y_offset`` properties. These offsets
may be vectorized by giving the name of a data source column.
Additionally, the label can be rotated with the ``angle`` property (which
may also be a column name.)
There are also standard text, fill, and line properties to control the
appearance of the text, its background, as well as the rectangular bounding
box border.
The data source is provided by setting the ``source`` property.
'''
x = NumberSpec(help="""
The x-coordinates to locate the text anchors.
""")
x_units = Enum(SpatialUnits, default='data', help="""
The unit type for the xs attribute. Interpreted as "data space" units
by default.
""")
y = NumberSpec(help="""
The y-coordinates to locate the text anchors.
""")
y_units = Enum(SpatialUnits, default='data', help="""
The unit type for the ys attribute. Interpreted as "data space" units
by default.
""")
text = StringSpec("text", help="""
The text values to render.
""")
angle = AngleSpec(default=0, help="""
The angles to rotate the text, as measured from the horizontal.
.. warning::
The center of rotation for canvas and css render_modes is different.
For `render_mode="canvas"` the label is rotated from the top-left
corner of the annotation, while for `render_mode="css"` the annotation
is rotated around it's center.
""")
x_offset = NumberSpec(default=0, help="""
Offset values to apply to the x-coordinates.
This is useful, for instance, if it is desired to "float" text a fixed
distance in screen units from a given data position.
""")
y_offset = NumberSpec(default=0, help="""
Offset values to apply to the y-coordinates.
This is useful, for instance, if it is desired to "float" text a fixed
distance in screen units from a given data position.
""")
text_props = Include(TextProps, use_prefix=False, help="""
The %s values for the text.
""")
background_props = Include(FillProps, use_prefix=True, help="""
The %s values for the text bounding box.
""")
background_fill_color = Override(default=None)
border_props = Include(LineProps, use_prefix=True, help="""
The %s values for the text bounding box.
""")
border_line_color = Override(default=None)
source = Instance(DataSource, default=lambda: ColumnDataSource(), help="""
Local data source to use when rendering annotations on the plot.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default y-range.
""")
render_mode = Enum(RenderMode, default="canvas", help="""
Specifies whether the text is rendered as a canvas element or as an
css element overlaid on the canvas. The default mode is "canvas".
.. note::
The CSS labels won't be present in the output using the "save" tool.
.. warning::
Not all visual styling properties are supported if the render_mode is
set to "css". The border_line_dash property isn't fully supported and
border_line_dash_offset isn't supported at all. Setting text_alpha will
modify the opacity of the entire background box and border in addition
to the text. Finally, clipping Label annotations inside of the plot
area isn't supported in "css" mode.
""")
class PolyAnnotation(Annotation):
''' Render a shaded polygonal region as an annotation.
'''
xs = Seq(Float, default=[], help="""
The x-coordinates of the region to draw.
""")
xs_units = Enum(SpatialUnits, default='data', help="""
The unit type for the xs attribute. Interpreted as "data space" units
by default.
""")
ys = Seq(Float, default=[], help="""
The y-coordinates of the region to draw.
""")
ys_units = Enum(SpatialUnits, default='data', help="""
The unit type for the ys attribute. Interpreted as "data space" units
by default.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering box annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering box annotations on the plot. If unset, use the default y-range.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the polygon.
""")
line_alpha = Override(default=0.3)
line_color = Override(default="#cccccc")
fill_props = Include(FillProps, use_prefix=False, help="""
The %s values for the polygon.
""")
fill_alpha = Override(default=0.4)
fill_color = Override(default="#fff9ba")
class Span(Annotation):
""" Render a horizontal or vertical line span.
"""
location = Float(help="""
The location of the span, along ``dimension``.
""")
location_units = Enum(SpatialUnits, default='data', help="""
The unit type for the location attribute. Interpreted as "data space"
units by default.
""")
dimension = Enum(Dimension, default='width', help="""
The direction of the span.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default y-range.
""")
render_mode = Enum(RenderMode, default="canvas", help="""
Specifies whether the span is rendered as a canvas element or as an
css element overlaid on the canvas. The default mode is "canvas".
.. warning::
The line_dash and line_dash_offset attributes aren't supported if
the render_mode is set to "css"
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the span.
""")
class Title(TextAnnotation):
''' Render a single title box as an annotation.
'''
text = String(help="""
The text value to render.
""")
vertical_align = Enum(VerticalAlign, default='bottom', help="""
Aligment of the text in its enclosing space, *across* the direction of the text.
""")
align = Enum(TextAlign, default='left', help="""
Aligment of the text in its enclosing space, *along* the direction of the text.
""")
offset = Float(default=0, help="""
Offset the text by a number of pixels (can be positive or negative). Shifts the text in
different directions based on the location of the title:
* above: shifts title right
* right: shifts title down
* below: shifts title right
* left: shifts title up
""")
text_font = String(default="helvetica", help="""
Name of a font to use for rendering text, e.g., ``'times'``,
``'helvetica'``.
""")
text_font_size = FontSizeSpec(default=value("10pt"))
text_font_style = Enum(FontStyle, default="bold", help="""
A style to use for rendering text.
Acceptable values are:
- ``'normal'`` normal text
- ``'italic'`` *italic text*
- ``'bold'`` **bold text**
""")
text_color = ColorSpec(default="#444444", help="""
A color to use to fill text with.
Acceptable values are:
- any of the 147 named `CSS colors`_, e.g ``'green'``, ``'indigo'``
- an RGB(A) hex value, e.g., ``'#FF0000'``, ``'#44444444'``
- a 3-tuple of integers (r,g,b) between 0 and 255
- a 4-tuple of (r,g,b,a) where r,g,b are integers between 0..255 and a is between 0..1
.. _CSS colors: http://www.w3schools.com/cssref/css_colornames.asp
""")
text_alpha = NumberSpec(default=1.0, help="""
An alpha value to use to fill text with.
Acceptable values are floating point numbers between 0 (transparent)
and 1 (opaque).
""")
background_props = Include(FillProps, use_prefix=True, help="""
The %s values for the text bounding box.
""")
background_fill_color = Override(default=None)
border_props = Include(LineProps, use_prefix=True, help="""
The %s values for the text bounding box.
""")
border_line_color = Override(default=None)
render_mode = Enum(RenderMode, default="canvas", help="""
Specifies whether the text is rendered as a canvas element or as an
css element overlaid on the canvas. The default mode is "canvas".
.. note::
The CSS labels won't be present in the output using the "save" tool.
.. warning::
Not all visual styling properties are supported if the render_mode is
set to "css". The border_line_dash property isn't fully supported and
border_line_dash_offset isn't supported at all. Setting text_alpha will
modify the opacity of the entire background box and border in addition
to the text. Finally, clipping Label annotations inside of the plot
area isn't supported in "css" mode.
""")
class Tooltip(Annotation):
''' Render a tooltip.
.. note::
This model is currently managed by BokehJS and is not useful
directly from python.
'''
level = Override(default="overlay")
attachment = Enum("horizontal", "vertical", "left", "right", "above", "below", help="""
Whether the tooltip should display to the left or right off the cursor
position or above or below it, or if it should be automatically placed
in the horizontal or vertical dimension.
""")
inner_only = Bool(default=True, help="""
Whether to display outside a central plot frame area.
""")
show_arrow = Bool(default=True, help="""
Whether tooltip's arrow should be showed.
""")
# This only exists to prevent a circular import.
def _DEFAULT_TEE():
from .arrow_heads import TeeHead
return TeeHead(level="underlay", size=10)
class Whisker(Annotation):
''' Render a whisker along a dimension.
'''
lower = DistanceSpec(help="""
The coordinates of the lower end of the whiskers.
""")
lower_head = Instance('.models.arrow_heads.ArrowHead', default=_DEFAULT_TEE, help="""
Instance of ArrowHead.
""")
upper = DistanceSpec(help="""
The coordinations of the upper end of the whiskers.
""")
upper_head = Instance('.models.arrow_heads.ArrowHead', default=_DEFAULT_TEE, help="""
Instance of ArrowHead.
""")
base = DistanceSpec(help="""
The orthogonal coordinates of the upper and lower values.
""")
dimension = Enum(Dimension, default='height', help="""
The direction of the band.
""")
source = Instance(DataSource, default=lambda: ColumnDataSource(), help="""
Local data source to use when rendering annotations on the plot.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default y-range.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the whisker body.
""")
level = Override(default="underlay")
class ToolbarPanel(Annotation): # TODO: this shouldn't be an annotation
toolbar = Instance(".models.tools.Toolbar", help="""
A toolbar to display.
""")
| rs2/bokeh | bokeh/models/annotations.py | Python | bsd-3-clause | 34,179 |
# Copyright (c) 2008, Stefano Taschini <[email protected]>
# All rights reserved.
# See LICENSE for details.
import unittest
from interval import interval, fpu
class FpuTestCase(unittest.TestCase):
def test_third(self):
"Nearest rounding of 1/3 is downwards."
self.assertEqual(1/3.0, fpu.down(lambda: 1.0 / 3.0))
self.assertTrue(1/3.0 < fpu.up(lambda: 1.0 / 3.0))
self.assertEqual(-1/3.0, fpu.up(lambda: 1.0 / -3.0))
self.assertTrue(-1/3.0 > fpu.down(lambda: 1.0 / -3.0))
def test_fourth(self):
" 1/4 is exact."
self.assertEqual(1/4.0, fpu.down(lambda: 1.0 / 4.0))
self.assertEqual(1/4.0, fpu.up(lambda: 1.0 / 4.0))
self.assertEqual(-1/4.0, fpu.up(lambda: 1.0 / -4.0))
self.assertEqual(-1/4.0, fpu.down(lambda: 1.0 / -4.0))
def test_fifth(self):
"Nearest rounding of 1/5 is upwards."
self.assertEqual(1/5.0, fpu.up(lambda: 1.0 / 5.0))
self.assertTrue(1/5.0 > fpu.down(lambda: 1.0 / 5.0))
self.assertEqual(-1/5.0, fpu.down(lambda: 1.0 / -5.0))
self.assertTrue(-1/5.0 < fpu.up(lambda: 1.0 / -5.0))
def test_ieee754(self):
"fpu.float respect ieee754 semantics."
self.assertEqual(fpu.infinity + fpu.infinity, fpu.infinity)
self.assertTrue(fpu.isnan(fpu.nan))
self.assertTrue(fpu.isnan(0.0 * fpu.infinity))
self.assertTrue(fpu.isnan(fpu.infinity - fpu.infinity))
def test_float_coercion(self):
"Only real-number scalars should be able to coerce as fpu.float"
self.assertRaises(Exception, lambda: float(1,2))
self.assertRaises(Exception, lambda: float((1,2)))
self.assertRaises(Exception, lambda: float([1,2]))
self.assertRaises(Exception, lambda: float('a'))
self.assertRaises(Exception, lambda: float(1+1j))
def test_min(self):
"Verify corner cases with nan, -inf, +inf"
self.assertEqual(fpu.min((1.0, 2.0)), 1.0)
self.assertEqual(fpu.min((1.0, fpu.infinity)), 1.0)
self.assertEqual(fpu.min((1.0, -fpu.infinity)), -fpu.infinity)
self.assertTrue(fpu.isnan(fpu.min((1.0, -fpu.nan))))
def test_max(self):
"Verify corner cases with nan, -inf, +inf"
self.assertEqual(fpu.max((1.0, 2.0)), 2.0)
self.assertEqual(fpu.max((1.0, fpu.infinity)), fpu.infinity)
self.assertEqual(fpu.max((1.0, -fpu.infinity)), 1.0)
self.assertTrue(fpu.isnan(fpu.max((1.0, fpu.nan))))
def test_power(self):
x = 1/3.0
# The cube of one third should depend on the rounding mode
self.assertTrue(fpu.down(lambda: x*x*x) < fpu.up(lambda: x*x*x))
# But using the built-in power operator, it doesn't necessarily do it
# print fpu.down(lambda: x**3) < fpu.up(lambda: x**3))
# So we define an integer power methods that does
self.assertTrue(fpu.power_rd(x, 3) < fpu.power_ru(x, 3))
self.assertTrue(fpu.power_rd(-x, 3) < fpu.power_ru(-x, 3))
self.assertTrue(fpu.power_rd(x, 4) < fpu.power_ru(x, 4))
self.assertTrue(fpu.power_rd(-x, 4) < fpu.power_ru(-x, 4))
self.assertEqual(
(fpu.down(lambda: x*x*x), fpu.up(lambda: x*x*x)),
(fpu.power_rd(x, 3), fpu.power_ru(x, 3)))
class ModuleTestCase(unittest.TestCase):
def test_namespace(self):
import interval
self.assertEqual(
dir(interval),
['__builtins__', '__doc__', '__file__', '__name__', '__path__', 'fpu', 'imath', 'inf', 'interval'])
class IntervalTestCase(unittest.TestCase):
def test_trivial_constructor(self):
self.assertEqual(interval[1], ((1, 1),))
self.assertEqual(interval(1), ((1, 1),))
self.assertEqual(interval[1, 2], ((1, 2),))
self.assertEqual(interval(1, 2), ((1, 1), (2, 2)))
self.assertEqual(interval([1, 2], [3, 4]), ((1, 2), (3, 4)))
self.assertEqual(interval([1,2]), interval(interval([1, 2])))
def test_nan_constructor(self):
self.assertEqual(interval[2, fpu.nan], ((-fpu.infinity, fpu.infinity),))
self.assertEqual(interval[2, fpu.nan], ((-fpu.infinity, fpu.infinity),))
self.assertEqual(interval(2, fpu.nan, 9), ((-fpu.infinity, fpu.infinity),))
def test_failing_constructor(self):
self.assertRaises(interval.ComponentError, lambda: interval[1, [2, 3]])
self.assertRaises(interval.ComponentError, lambda: interval[1, 2, 3])
self.assertRaises(interval.ComponentError, lambda: interval(0, [1, 2, 3]))
self.assertRaises(interval.ComponentError, lambda: interval(0, [1, [2, 3]]))
self.assertRaises(interval.ComponentError, lambda: interval['a', 1])
def test_canonical_constructor(self):
self.assertEqual(interval([1, 3], [4, 6], [2, 5], 9), ((1, 6), (9, 9)))
self.assertEqual(interval[2 ** (52 + 1) - 1], interval[9007199254740991.0])
self.assertEqual(interval[2 ** (52 + 1) + 1], interval[4503599627370496 * 2.0, 4503599627370497 * 2.0])
self.assertEqual(interval[-2 ** (52 + 1) + 1], interval[-9007199254740991.0])
self.assertEqual(interval[-2 ** (52 + 1) - 1], interval[-4503599627370497 * 2.0, -4503599627370496 * 2.0])
self.assertEqual(interval[2 ** (52 + 2) + 1], interval[4503599627370496 * 4.0, 4503599627370497 * 4.0])
self.assertEqual(interval[2 ** (52 + 2) + 2], interval[4503599627370496 * 4.0, 4503599627370497 * 4.0])
self.assertEqual(interval[2 ** (52 + 2) + 3], interval[4503599627370496 * 4.0, 4503599627370497 * 4.0])
self.assertEqual(interval[-2 ** (52 + 2) - 1], interval[-4503599627370497 * 4.0, -4503599627370496 * 4.0])
self.assertEqual(interval[-2 ** (52 + 2) - 2], interval[-4503599627370497 * 4.0, -4503599627370496 * 4.0])
self.assertEqual(interval[-2 ** (52 + 2) - 3], interval[-4503599627370497 * 4.0, -4503599627370496 * 4.0])
def test_unary(self):
self.assertEqual(interval[1, 2], +interval[1, 2])
self.assertEqual(interval[-2, -1], -interval[1, 2])
def test_sum(self):
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[-fpu.infinity] + interval[fpu.infinity])
self.assertEqual(interval[4, 6], interval[1, 2] + interval[3, 4])
self.assertEqual(interval[3, fpu.infinity], interval[1, fpu.infinity] + interval[2])
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[-fpu.infinity, -1] + interval[2, +fpu.infinity])
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[-fpu.infinity] + interval[8, +fpu.infinity])
self.assertEqual(interval([1, 2], [10, fpu.infinity]) + interval([1,9],[-2,-1]), interval([-1, 1], [2, fpu.infinity]))
self.assertEqual(interval[1, 9] + interval([1, 2], [10, fpu.infinity]), interval[2, fpu.infinity])
def test_sum_coercion(self):
self.assertEqual(interval[1,2] + 2, interval[3, 4])
self.assertRaises(TypeError, lambda: interval[1,2] + 1j)
self.assertEqual(1 + interval[4, 5], interval[5, 6])
self.assertRaises(TypeError, lambda: (1, 2) + interval[1,2])
self.assertEqual(fpu.infinity + interval[4, 5], interval[fpu.infinity])
def test_sub(self):
self.assertEqual(interval[1, 2] - interval[3, 4], interval[-3.0, -1.0])
self.assertEqual(interval[1, 2] - 0.5, interval[0.5, 1.5])
self.assertEqual(1.5 - interval[1, 2], interval[-0.5, 0.5])
def test_mul(self):
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], fpu.infinity * interval[0])
self.assertEqual(interval[+fpu.infinity], interval[+fpu.infinity] * interval[3])
self.assertEqual(interval[-8, +10], interval[1, 2] * interval[-4, 5])
self.assertEqual(interval[3, 8], interval[1, 2] * interval[3, 4])
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[0,1 ] * interval[2, +fpu.infinity])
self.assertEqual(interval[2, fpu.infinity], interval[-fpu.infinity,-2] * interval[-fpu.infinity,-1])
self.assertEqual(interval([1, 2], [3, 4]) * interval[0.5, 2], interval[0.5, 8])
self.assertEqual(interval[1, 2] * 2, interval[2, 4])
def test_inverse(self):
self.assertEqual(interval[0.5, 1], interval[1, 2].inverse())
self.assertEqual(interval[-1, -0.5],(-interval[1, 2]).inverse())
self.assertEqual(interval([-fpu.infinity, -1], [0.5, +fpu.infinity]), interval[-1,2].inverse())
self.assertEqual(interval(-fpu.infinity, [1, +fpu.infinity]), interval[0,1].inverse())
self.assertEqual(interval([-fpu.infinity, -2.0], [0.0, fpu.infinity]),
interval([-0.5, 0.5], [0.2, fpu.infinity]).inverse())
def test_division(self):
self.assertEqual(interval[-fpu.infinity, fpu.infinity], interval[0,1] / interval[0,1])
self.assertEqual(interval[0.5], interval[1] / 2)
self.assertEqual(interval[0.5], 1 / interval[2])
def test_power(self):
self.assertRaises(TypeError, lambda: interval[1, 2] ** (1.3))
self.assertEqual((-interval[1, 2]).inverse(), (-interval[1, 2]) ** -1)
self.assertEqual(interval[0, 4], interval[-1, 2] ** 2)
self.assertEqual(interval[-27, 8], interval[-3, 2] ** 3)
self.assertEqual(interval[-1, 2], (interval[-1,2]**-1)**-1)
self.assertEqual(interval([-0.38712442133802405]) ** 3, interval([-0.058016524353106828, -0.058016524353106808]))
self.assertEqual(
interval[fpu.down(lambda: (1/3.0)*(1/3.0)), fpu.up(lambda: (1/3.0)*(1/3.0))],
(interval[1]/3.0) ** 2)
self.assertEqual(
interval[fpu.down(lambda: (1/3.0)*(1/3.0)*(1/3.0)), fpu.up(lambda: (1/3.0)*(1/3.0)*(1/3.0))],
(interval[1]/3.0) ** 3)
def test_format(self):
for x in interval[1], interval[1,2], interval([1,2], [3,4]):
self.assertEqual(x, eval(repr(x)))
def test_intersection(self):
self.assertEqual(interval[1, 2] & interval[0, 3], interval[1, 2])
self.assertEqual(interval[1.1, 1.9] & interval[1.3, 2.5], interval[1.3, 1.9])
self.assertEqual(interval[1.1, 1.9] & interval[0.3, 0.7], interval())
self.assertEqual(interval([1, 3], [4, 5]) & interval[2], interval[2])
self.assertEqual(interval([1, 3], [4, 5]) & interval(2,4.5), interval(2, 4.5))
self.assertEqual(interval[1, 2] & 1.2, interval(1.2))
self.assertEqual(2.1 & interval[1, 2], interval())
def test_union(self):
self.assertEqual(interval([1, 6], 9), interval([1, 3], [4, 6]) | interval([2, 5], 9))
self.assertEqual(interval[1, 2] | 2.1, interval([1, 2], 2.1))
self.assertEqual(2.1 | interval[1, 2], interval([1, 2], 2.1))
def test_hull(self):
self.assertEqual(interval([1, 9]), interval.hull((interval([1, 3], [4, 6]), interval([2, 5], 9))))
def test_inclusion(self):
def verify_in(x, y):
self.assertTrue(x in y)
self.assertEqual(x & y, interval(x))
verify_in(1.5, interval[1, 2])
verify_in(1, interval[1, 2])
verify_in(2, interval[1, 2])
verify_in(interval[1, 2], interval[1, 2])
verify_in(interval[1.1, 2], interval[1, 2])
verify_in(interval[1, 1.8], interval[1, 2])
verify_in(interval([1.1, 2.2], [3.3, 4.4]), interval(-1, [0, 2.5], [3, 5], [7, 9]))
def verify_out(x, y):
self.assertFalse(x in y)
self.assertNotEqual(x & y, x)
verify_out(0, interval[1, 2])
verify_out(4, interval[1, 2])
verify_out(interval[1, 3], interval[2, 4])
verify_out(interval(1, 3), interval(2, 4))
def test_extrema(self):
self.assertEqual(interval(1, [2, 3], 4).extrema, interval(1, 2, 3, 4))
def test_midpoint(self):
self.assertEqual(interval[0, 4].midpoint, interval[2])
self.assertEqual(interval(-1, 1, 4), interval(-1, [0, 2], [3, 5]).midpoint)
class NewtonTestCase(unittest.TestCase):
def test_opts(self):
self.assertRaises(TypeError, lambda: interval(0,1).newton(None, None, nonexisting=True))
def test_cubic(self):
self.assertEqual(
interval[-2, 2].newton(lambda x: x**3 - x, lambda x: 3*x**2-1),
interval(-1, 0, 1))
self.assertEqual(
interval[-5, 5].newton(lambda x: x**3 + x - 10, lambda x: 3*x**2 + 1),
interval[2])
self.assertEqual(
interval[-5, 5].newton(lambda x: x**3 + x - 15, lambda x: 3*x**2 + 1),
interval[5249383869325653 * 2.0 ** -51, 5249383869325655 * 2.0 ** -51])
# The sharpest result would be with 5249383869325654 * 2.0 ** -51 as sup.
def test_sqrt2(self):
import math
f, p = lambda x: x**2 - 2, lambda x: 2 * x
u, v = 6369051672525772 * 2.0 **-52, 6369051672525773 * 2.0 **-52
self.assertEqual(v, math.sqrt(2))
s = interval[u, v]
self.assertEqual(s, interval[0.1, 5].newton(f, p))
self.assertEqual(s, interval[0, 2].newton(f, p))
self.assertEqual(s, interval[-1, 10].newton(f, p))
self.assertEqual(interval(), interval[2, 5].newton(f, p))
self.assertEqual(-s, interval[-5, 0].newton(f, p))
self.assertEqual(-s|s, interval[-5, +5].newton(f, p))
if __name__ == '__main__':
unittest.main()
| abarnert/pyinterval | test/test_basic.py | Python | bsd-3-clause | 13,302 |
import threading
from ctypes import POINTER, Structure, byref, c_char, c_char_p, c_int, c_size_t
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_sized_string, check_string,
)
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.utils import six
from django.utils.encoding import force_bytes
# ### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure):
pass
class WKTWriter_st(Structure):
pass
class WKBReader_st(Structure):
pass
class WKBWriter_st(Structure):
pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
# WKTReader routines
wkt_reader_create = GEOSFuncFactory('GEOSWKTReader_create', restype=WKT_READ_PTR)
wkt_reader_destroy = GEOSFuncFactory('GEOSWKTReader_destroy', argtypes=[WKT_READ_PTR])
wkt_reader_read = GEOSFuncFactory(
'GEOSWKTReader_read', argtypes=[WKT_READ_PTR, c_char_p], restype=GEOM_PTR, errcheck=check_geom
)
# WKTWriter routines
wkt_writer_create = GEOSFuncFactory('GEOSWKTWriter_create', restype=WKT_WRITE_PTR)
wkt_writer_destroy = GEOSFuncFactory('GEOSWKTWriter_destroy', argtypes=[WKT_WRITE_PTR])
wkt_writer_write = GEOSFuncFactory(
'GEOSWKTWriter_write', argtypes=[WKT_WRITE_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
wkt_writer_get_outdim = GEOSFuncFactory(
'GEOSWKTWriter_getOutputDimension', argtypes=[WKT_WRITE_PTR], restype=c_int
)
wkt_writer_set_outdim = GEOSFuncFactory(
'GEOSWKTWriter_setOutputDimension', argtypes=[WKT_WRITE_PTR, c_int]
)
wkt_writer_set_trim = GEOSFuncFactory('GEOSWKTWriter_setTrim', argtypes=[WKT_WRITE_PTR, c_char])
wkt_writer_set_precision = GEOSFuncFactory('GEOSWKTWriter_setRoundingPrecision', argtypes=[WKT_WRITE_PTR, c_int])
# WKBReader routines
wkb_reader_create = GEOSFuncFactory('GEOSWKBReader_create', restype=WKB_READ_PTR)
wkb_reader_destroy = GEOSFuncFactory('GEOSWKBReader_destroy', argtypes=[WKB_READ_PTR])
class WKBReadFunc(GEOSFuncFactory):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
wkb_reader_read = WKBReadFunc('GEOSWKBReader_read')
wkb_reader_read_hex = WKBReadFunc('GEOSWKBReader_readHEX')
# WKBWriter routines
wkb_writer_create = GEOSFuncFactory('GEOSWKBWriter_create', restype=WKB_WRITE_PTR)
wkb_writer_destroy = GEOSFuncFactory('GEOSWKBWriter_destroy', argtypes=[WKB_WRITE_PTR])
# WKB Writing prototypes.
class WKBWriteFunc(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
wkb_writer_write = WKBWriteFunc('GEOSWKBWriter_write')
wkb_writer_write_hex = WKBWriteFunc('GEOSWKBWriter_writeHEX')
# WKBWriter property getter/setter prototypes.
class WKBWriterGet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR]
restype = c_int
class WKBWriterSet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, c_int]
wkb_writer_get_byteorder = WKBWriterGet('GEOSWKBWriter_getByteOrder')
wkb_writer_set_byteorder = WKBWriterSet('GEOSWKBWriter_setByteOrder')
wkb_writer_get_outdim = WKBWriterGet('GEOSWKBWriter_getOutputDimension')
wkb_writer_set_outdim = WKBWriterSet('GEOSWKBWriter_setOutputDimension')
wkb_writer_get_include_srid = WKBWriterGet('GEOSWKBWriter_getIncludeSRID', restype=c_char)
wkb_writer_set_include_srid = WKBWriterSet('GEOSWKBWriter_setIncludeSRID', argtypes=[WKB_WRITE_PTR, c_char])
# ### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
# Loading the real destructor function at this point as doing it in
# __del__ is too late (import error).
self._destructor.func = self._destructor.get_func(
*self._destructor.args, **self._destructor.kwargs
)
def __del__(self):
# Cleaning up with the appropriate destructor.
try:
self._destructor(self._ptr)
except (AttributeError, TypeError):
pass # Some part might already have been garbage collected
# ### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
_destructor = wkt_reader_destroy
ptr_type = WKT_READ_PTR
def read(self, wkt):
if not isinstance(wkt, (bytes, six.string_types)):
raise TypeError
return wkt_reader_read(self.ptr, force_bytes(wkt))
class _WKBReader(IOBase):
_constructor = wkb_reader_create
_destructor = wkb_reader_destroy
ptr_type = WKB_READ_PTR
def read(self, wkb):
"Returns a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, six.memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, six.string_types)):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
# ### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
_destructor = wkt_writer_destroy
ptr_type = WKT_WRITE_PTR
_trim = False
_precision = None
def __init__(self, dim=2, trim=False, precision=None):
super(WKTWriter, self).__init__()
if bool(trim) != self._trim:
self.trim = trim
if precision is not None:
self.precision = precision
self.outdim = dim
def write(self, geom):
"Returns the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
@property
def outdim(self):
return wkt_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKT output dimension must be 2 or 3')
wkt_writer_set_outdim(self.ptr, new_dim)
@property
def trim(self):
return self._trim
@trim.setter
def trim(self, flag):
if bool(flag) != self._trim:
self._trim = bool(flag)
wkt_writer_set_trim(self.ptr, b'\x01' if flag else b'\x00')
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, precision):
if (not isinstance(precision, int) or precision < 0) and precision is not None:
raise AttributeError('WKT output rounding precision must be non-negative integer or None.')
if precision != self._precision:
self._precision = precision
wkt_writer_set_precision(self.ptr, -1 if precision is None else precision)
class WKBWriter(IOBase):
_constructor = wkb_writer_create
_destructor = wkb_writer_destroy
ptr_type = WKB_WRITE_PTR
def __init__(self, dim=2):
super(WKBWriter, self).__init__()
self.outdim = dim
def _handle_empty_point(self, geom):
from django.contrib.gis.geos import Point
if isinstance(geom, Point) and geom.empty:
if self.srid:
# PostGIS uses POINT(NaN NaN) for WKB representation of empty
# points. Use it for EWKB as it's a PostGIS specific format.
# https://trac.osgeo.org/postgis/ticket/3181
geom = Point(float('NaN'), float('NaN'), srid=geom.srid)
else:
raise ValueError('Empty point is not representable in WKB.')
return geom
def write(self, geom):
"Returns the WKB representation of the given geometry."
from django.contrib.gis.geos import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t()))
if isinstance(geom, Polygon) and geom.empty:
# Fix GEOS output for empty polygon.
# See https://trac.osgeo.org/geos/ticket/680.
wkb = wkb[:-8] + b'\0' * 4
return six.memoryview(wkb)
def write_hex(self, geom):
"Returns the HEXEWKB representation of the given geometry."
from django.contrib.gis.geos.polygon import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
if isinstance(geom, Polygon) and geom.empty:
wkb = wkb[:-16] + b'0' * 8
return wkb
# ### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
@property
def outdim(self):
return wkb_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
# Property for getting/setting the include srid flag.
@property
def srid(self):
return bool(ord(wkb_writer_get_include_srid(self.ptr)))
@srid.setter
def srid(self, include):
if include:
flag = b'\x01'
else:
flag = b'\x00'
wkb_writer_set_include_srid(self.ptr, flag)
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
if not thread_context.wkt_r:
thread_context.wkt_r = _WKTReader()
return thread_context.wkt_r
def wkt_w(dim=2, trim=False, precision=None):
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter(dim=dim, trim=trim, precision=precision)
else:
thread_context.wkt_w.outdim = dim
thread_context.wkt_w.trim = trim
thread_context.wkt_w.precision = precision
return thread_context.wkt_w
def wkb_r():
if not thread_context.wkb_r:
thread_context.wkb_r = _WKBReader()
return thread_context.wkb_r
def wkb_w(dim=2):
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter(dim=dim)
else:
thread_context.wkb_w.outdim = dim
return thread_context.wkb_w
def ewkb_w(dim=2):
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter(dim=dim)
thread_context.ewkb_w.srid = True
else:
thread_context.ewkb_w.outdim = dim
return thread_context.ewkb_w
| erikr/django | django/contrib/gis/geos/prototypes/io.py | Python | bsd-3-clause | 11,671 |
"""
Downloads the following:
- Korean Wikipedia texts
- Korean
"""
from sqlparse import parsestream
from sqlparse.sql import Parenthesis
for statement in parsestream(open('data/test.sql')):
texts = [str(token.tokens[1].tokens[-1]).decode('string_escape') for token in statement.tokens if isinstance(token, Parenthesis)]
print texts
texts = [text for text in texts if text[0] != '#']
if texts:
print "\n===\n".join(texts)
| carpedm20/Bias | scripts/download.py | Python | bsd-3-clause | 449 |
"""
Methods to characterize image textures.
"""
import numpy as np
from ._texture import _glcm_loop, _local_binary_pattern
def greycomatrix(image, distances, angles, levels=256, symmetric=False,
normed=False):
"""Calculate the grey-level co-occurrence matrix.
A grey level co-occurence matrix is a histogram of co-occuring
greyscale values at a given offset over an image.
Parameters
----------
image : array_like of uint8
Integer typed input image. The image will be cast to uint8, so
the maximum value must be less than 256.
distances : array_like
List of pixel pair distance offsets.
angles : array_like
List of pixel pair angles in radians.
levels : int, optional
The input image should contain integers in [0, levels-1],
where levels indicate the number of grey-levels counted
(typically 256 for an 8-bit image). The maximum value is
256.
symmetric : bool, optional
If True, the output matrix `P[:, :, d, theta]` is symmetric. This
is accomplished by ignoring the order of value pairs, so both
(i, j) and (j, i) are accumulated when (i, j) is encountered
for a given offset. The default is False.
normed : bool, optional
If True, normalize each matrix `P[:, :, d, theta]` by dividing
by the total number of accumulated co-occurrences for the given
offset. The elements of the resulting matrix sum to 1. The
default is False.
Returns
-------
P : 4-D ndarray
The grey-level co-occurrence histogram. The value
`P[i,j,d,theta]` is the number of times that grey-level `j`
occurs at a distance `d` and at an angle `theta` from
grey-level `i`. If `normed` is `False`, the output is of
type uint32, otherwise it is float64.
References
----------
.. [1] The GLCM Tutorial Home Page,
http://www.fp.ucalgary.ca/mhallbey/tutorial.htm
.. [2] Pattern Recognition Engineering, Morton Nadler & Eric P.
Smith
.. [3] Wikipedia, http://en.wikipedia.org/wiki/Co-occurrence_matrix
Examples
--------
Compute 2 GLCMs: One for a 1-pixel offset to the right, and one
for a 1-pixel offset upwards.
>>> image = np.array([[0, 0, 1, 1],
... [0, 0, 1, 1],
... [0, 2, 2, 2],
... [2, 2, 3, 3]], dtype=np.uint8)
>>> result = greycomatrix(image, [1], [0, np.pi/4, np.pi/2, 3*np.pi/4], levels=4)
>>> result[:, :, 0, 0]
array([[2, 2, 1, 0],
[0, 2, 0, 0],
[0, 0, 3, 1],
[0, 0, 0, 1]], dtype=uint32)
>>> result[:, :, 0, 1]
array([[1, 1, 3, 0],
[0, 1, 1, 0],
[0, 0, 0, 2],
[0, 0, 0, 0]], dtype=uint32)
>>> result[:, :, 0, 2]
array([[3, 0, 2, 0],
[0, 2, 2, 0],
[0, 0, 1, 2],
[0, 0, 0, 0]], dtype=uint32)
>>> result[:, :, 0, 3]
array([[2, 0, 0, 0],
[1, 1, 2, 0],
[0, 0, 2, 1],
[0, 0, 0, 0]], dtype=uint32)
"""
assert levels <= 256
image = np.ascontiguousarray(image)
assert image.ndim == 2
assert image.min() >= 0
assert image.max() < levels
image = image.astype(np.uint8)
distances = np.ascontiguousarray(distances, dtype=np.float64)
angles = np.ascontiguousarray(angles, dtype=np.float64)
assert distances.ndim == 1
assert angles.ndim == 1
P = np.zeros((levels, levels, len(distances), len(angles)),
dtype=np.uint32, order='C')
# count co-occurences
_glcm_loop(image, distances, angles, levels, P)
# make each GLMC symmetric
if symmetric:
Pt = np.transpose(P, (1, 0, 2, 3))
P = P + Pt
# normalize each GLMC
if normed:
P = P.astype(np.float64)
glcm_sums = np.apply_over_axes(np.sum, P, axes=(0, 1))
glcm_sums[glcm_sums == 0] = 1
P /= glcm_sums
return P
def greycoprops(P, prop='contrast'):
"""Calculate texture properties of a GLCM.
Compute a feature of a grey level co-occurrence matrix to serve as
a compact summary of the matrix. The properties are computed as
follows:
- 'contrast': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}(i-j)^2`
- 'dissimilarity': :math:`\\sum_{i,j=0}^{levels-1}P_{i,j}|i-j|`
- 'homogeneity': :math:`\\sum_{i,j=0}^{levels-1}\\frac{P_{i,j}}{1+(i-j)^2}`
- 'ASM': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}^2`
- 'energy': :math:`\\sqrt{ASM}`
- 'correlation':
.. math:: \\sum_{i,j=0}^{levels-1} P_{i,j}\\left[\\frac{(i-\\mu_i) \\
(j-\\mu_j)}{\\sqrt{(\\sigma_i^2)(\\sigma_j^2)}}\\right]
Parameters
----------
P : ndarray
Input array. `P` is the grey-level co-occurrence histogram
for which to compute the specified property. The value
`P[i,j,d,theta]` is the number of times that grey-level j
occurs at a distance d and at an angle theta from
grey-level i.
prop : {'contrast', 'dissimilarity', 'homogeneity', 'energy', \
'correlation', 'ASM'}, optional
The property of the GLCM to compute. The default is 'contrast'.
Returns
-------
results : 2-D ndarray
2-dimensional array. `results[d, a]` is the property 'prop' for
the d'th distance and the a'th angle.
References
----------
.. [1] The GLCM Tutorial Home Page,
http://www.fp.ucalgary.ca/mhallbey/tutorial.htm
Examples
--------
Compute the contrast for GLCMs with distances [1, 2] and angles
[0 degrees, 90 degrees]
>>> image = np.array([[0, 0, 1, 1],
... [0, 0, 1, 1],
... [0, 2, 2, 2],
... [2, 2, 3, 3]], dtype=np.uint8)
>>> g = greycomatrix(image, [1, 2], [0, np.pi/2], levels=4,
... normed=True, symmetric=True)
>>> contrast = greycoprops(g, 'contrast')
>>> contrast
array([[ 0.58333333, 1. ],
[ 1.25 , 2.75 ]])
"""
assert P.ndim == 4
(num_level, num_level2, num_dist, num_angle) = P.shape
assert num_level == num_level2
assert num_dist > 0
assert num_angle > 0
# create weights for specified property
I, J = np.ogrid[0:num_level, 0:num_level]
if prop == 'contrast':
weights = (I - J) ** 2
elif prop == 'dissimilarity':
weights = np.abs(I - J)
elif prop == 'homogeneity':
weights = 1. / (1. + (I - J) ** 2)
elif prop in ['ASM', 'energy', 'correlation']:
pass
else:
raise ValueError('%s is an invalid property' % (prop))
# compute property for each GLCM
if prop == 'energy':
asm = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]
results = np.sqrt(asm)
elif prop == 'ASM':
results = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]
elif prop == 'correlation':
results = np.zeros((num_dist, num_angle), dtype=np.float64)
I = np.array(range(num_level)).reshape((num_level, 1, 1, 1))
J = np.array(range(num_level)).reshape((1, num_level, 1, 1))
diff_i = I - np.apply_over_axes(np.sum, (I * P), axes=(0, 1))[0, 0]
diff_j = J - np.apply_over_axes(np.sum, (J * P), axes=(0, 1))[0, 0]
std_i = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_i) ** 2),
axes=(0, 1))[0, 0])
std_j = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_j) ** 2),
axes=(0, 1))[0, 0])
cov = np.apply_over_axes(np.sum, (P * (diff_i * diff_j)),
axes=(0, 1))[0, 0]
# handle the special case of standard deviations near zero
mask_0 = std_i < 1e-15
mask_0[std_j < 1e-15] = True
results[mask_0] = 1
# handle the standard case
mask_1 = mask_0 == False
results[mask_1] = cov[mask_1] / (std_i[mask_1] * std_j[mask_1])
elif prop in ['contrast', 'dissimilarity', 'homogeneity']:
weights = weights.reshape((num_level, num_level, 1, 1))
results = np.apply_over_axes(np.sum, (P * weights), axes=(0, 1))[0, 0]
return results
def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbour set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'default', 'ror', 'uniform', 'var'}
Method to determine the pattern.
* 'default': original local binary pattern which is gray scale but not
rotation invariant.
* 'ror': extension of default implementation which is gray scale and
rotation invariant.
* 'uniform': improved rotation invariance with uniform patterns and
finer quantization of the angular space which is gray scale and
rotation invariant.
* 'nri_uniform': non rotation-invariant uniform patterns variant
which is only gray scale invariant [2].
* 'var': rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
Classification with Local Binary Patterns.
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
http://www.rafbis.it/biplab15/images/stories/docenti/Danielriccio/\
Articoliriferimento/LBP.pdf, 2002.
.. [2] Face recognition with local binary patterns.
Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
2004.
"""
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V')
}
image = np.ascontiguousarray(image, dtype=np.double)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
| SamHames/scikit-image | skimage/feature/texture.py | Python | bsd-3-clause | 10,468 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'mptt',
'cms',
'menus',
'djangocms_inherit',
'south',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
TEMPLATE_CONTEXT_PROCESSORS = [
'django.core.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'cms.context_processors.media',
'sekizai.context_processors.sekizai',
]
ROOT_URLCONF = 'cms.urls'
def schemamigration():
# turn ``schemamigration.py --initial`` into
# ``manage.py schemamigration cmsplugin_disqus --initial`` and setup the
# enviroment
from django.conf import settings
from django.core.management import ManagementUtility
settings.configure(
INSTALLED_APPS=INSTALLED_APPS,
ROOT_URLCONF=ROOT_URLCONF,
DATABASES=DATABASES,
TEMPLATE_CONTEXT_PROCESSORS=TEMPLATE_CONTEXT_PROCESSORS
)
argv = list(sys.argv)
argv.insert(1, 'schemamigration')
argv.insert(2, 'djangocms_inherit')
utility = ManagementUtility(argv)
utility.execute()
if __name__ == "__main__":
schemamigration()
| divio/djangocms-inherit | schemamigration.py | Python | bsd-3-clause | 1,455 |
# Copyright (c) 2016, the GPyOpt Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from ...models import GPModel
import numpy as np
class CostModel(object):
"""
Class to handle the cost of evaluating the function.
param cost_withGradients: function that returns the cost of evaluating the function and its gradient. By default
no cost is used. Options are:
- cost_withGradients is some pre-defined cost function. Should return numpy array as outputs.
- cost_withGradients = 'evaluation_time'.
.. Note:: if cost_withGradients = 'evaluation time' the evaluation time of the function is used to model a GP whose
mean is used as cost.
"""
def __init__(self, cost_withGradients):
super(CostModel, self).__init__()
self.cost_type = cost_withGradients
# --- Set-up evaluation cost
if self.cost_type is None:
self.cost_withGradients = constant_cost_withGradients
self.cost_type = 'Constant cost'
elif self.cost_type == 'evaluation_time':
self.cost_model = GPModel()
self.cost_withGradients = self._cost_gp_withGradients
self.num_updates = 0
else:
self.cost_withGradients = cost_withGradients
self.cost_type = 'User defined cost'
def _cost_gp(self,x):
"""
Predicts the time cost of evaluating the function at x.
"""
m, _, _, _ = self.cost_model.predict_withGradients(x)
return np.exp(m)
def _cost_gp_withGradients(self,x):
"""
Predicts the time cost and its gradient of evaluating the function at x.
"""
m, _, dmdx, _= self.cost_model.predict_withGradients(x)
return np.exp(m), np.exp(m)*dmdx
def update_cost_model(self, x, cost_x):
"""
Updates the GP used to handle the cost.
param x: input of the GP for the cost model.
param x_cost: values of the time cost at the input locations.
"""
if self.cost_type == 'evaluation_time':
cost_evals = np.log(np.atleast_2d(np.asarray(cost_x)).T)
if self.num_updates == 0:
X_all = x
costs_all = cost_evals
else:
X_all = np.vstack((self.cost_model.model.X,x))
costs_all = np.vstack((self.cost_model.model.Y,cost_evals))
self.num_updates += 1
self.cost_model.updateModel(X_all, costs_all, None, None)
def constant_cost_withGradients(x):
"""
Constant cost function used by default: cost = 1, d_cost = 0.
"""
return np.ones(x.shape[0])[:,None], np.zeros(x.shape)
| SheffieldML/GPyOpt | GPyOpt/core/task/cost.py | Python | bsd-3-clause | 2,686 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu ([email protected])
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
from selenium import webdriver
import os
import time
import logging
import re
import random
from cameo.utility import Utility
from cameo.localdb import LocalDbForTECHORANGE
"""
抓取 科技報橘 html 存放到 source_html
"""
class SpiderForTECHORANGE:
#建構子
def __init__(self):
self.SOURCE_HTML_BASE_FOLDER_PATH = u"cameo_res\\source_html"
self.PARSED_RESULT_BASE_FOLDER_PATH = u"cameo_res\\parsed_result"
self.strWebsiteDomain = u"http://buzzorange.com/techorange"
self.dicSubCommandHandler = {
"index":self.downloadIndexPage,
"tag":self.downloadTagPag,
"news":self.downloadNewsPage
}
self.utility = Utility()
self.db = LocalDbForTECHORANGE()
self.driver = None
#取得 spider 使用資訊
def getUseageMessage(self):
return ("- TECHORANGE -\n"
"useage:\n"
"index - download entry page of TECHORANGE \n"
"tag - download not obtained tag page \n"
"news [tag] - download not obtained news [of given tag] \n")
#取得 selenium driver 物件
def getDriver(self):
chromeDriverExeFilePath = "cameo_res\\chromedriver.exe"
driver = webdriver.Chrome(chromeDriverExeFilePath)
return driver
#初始化 selenium driver 物件
def initDriver(self):
if self.driver is None:
self.driver = self.getDriver()
#終止 selenium driver 物件
def quitDriver(self):
self.driver.quit()
self.driver = None
#執行 spider
def runSpider(self, lstSubcommand=None):
strSubcommand = lstSubcommand[0]
strArg1 = None
if len(lstSubcommand) == 2:
strArg1 = lstSubcommand[1]
self.initDriver() #init selenium driver
self.dicSubCommandHandler[strSubcommand](strArg1)
self.quitDriver() #quit selenium driver
#下載 index 頁面
def downloadIndexPage(self, uselessArg1=None):
logging.info("download index page")
strIndexHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + u"\\TECHORANGE"
if not os.path.exists(strIndexHtmlFolderPath):
os.mkdir(strIndexHtmlFolderPath) #mkdir source_html/TECHORANGE/
#科技報橘首頁
self.driver.get("https://buzzorange.com/techorange/")
#儲存 html
strIndexHtmlFilePath = strIndexHtmlFolderPath + u"\\index.html"
self.utility.overwriteSaveAs(strFilePath=strIndexHtmlFilePath, unicodeData=self.driver.page_source)
#下載 tag 頁面
def downloadTagPag(self, uselessArg1=None):
logging.info("download tag page")
strTagHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + u"\\TECHORANGE\\tag"
if not os.path.exists(strTagHtmlFolderPath):
os.mkdir(strTagHtmlFolderPath) #mkdir source_html/TECHORANGE/tag/
strTagWebsiteDomain = self.strWebsiteDomain + u"/tag"
#取得 Db 中尚未下載的 Tag 名稱
lstStrNotObtainedTagName = self.db.fetchallNotObtainedTagName()
for strNotObtainedTagName in lstStrNotObtainedTagName:
#略過名稱太長的 tag
if len(strNotObtainedTagName) > 60:
continue
strTagUrl = strTagWebsiteDomain + u"/" + strNotObtainedTagName
#tag 第0頁
intPageNum = 0
time.sleep(random.randint(2,5)) #sleep random time
self.driver.get(strTagUrl)
#儲存 html
strTagHtmlFilePath = strTagHtmlFolderPath + u"\\%d_%s_tag.html"%(intPageNum, strNotObtainedTagName)
self.utility.overwriteSaveAs(strFilePath=strTagHtmlFilePath, unicodeData=self.driver.page_source)
#tag 下一頁
elesNextPageA = self.driver.find_elements_by_css_selector("div.nav-links a.next.page-numbers")
while len(elesNextPageA) != 0:
time.sleep(random.randint(2,5)) #sleep random time
intPageNum = intPageNum+1
strTagUrl = elesNextPageA[0].get_attribute("href")
self.driver.get(strTagUrl)
#儲存 html
strTagHtmlFilePath = strTagHtmlFolderPath + u"\\%d_%s_tag.html"%(intPageNum, strNotObtainedTagName)
self.utility.overwriteSaveAs(strFilePath=strTagHtmlFilePath, unicodeData=self.driver.page_source)
#tag 再下一頁
elesNextPageA = self.driver.find_elements_by_css_selector("div.nav-links a.next.page-numbers")
#更新tag DB 為已抓取 (isGot = 1)
self.db.updateTagStatusIsGot(strTagName=strNotObtainedTagName)
logging.info("got tag %s"%strNotObtainedTagName)
#限縮 字串長度低於 128 字元
def limitStrLessThen128Char(self, strStr=None):
if len(strStr) > 128:
logging.info("limit str less then 128 char")
return strStr[:127] + u"_"
else:
return strStr
#下載 news 頁面 (strTagName == None 會自動找尋已下載完成之 tag,但若未先執行 parser tag 即使 tag 已下載完成亦無法下載 news)
def downloadNewsPage(self, strTagName=None):
if strTagName is None:
#未指定 tag
lstStrObtainedTagName = self.db.fetchallCompletedObtainedTagName()
for strObtainedTagName in lstStrObtainedTagName:
self.downloadNewsPageWithGivenTagName(strTagName=strObtainedTagName)
else:
#有指定 tag 名稱
self.downloadNewsPageWithGivenTagName(strTagName=strTagName)
#下載 news 頁面 (指定 tag 名稱)
def downloadNewsPageWithGivenTagName(self, strTagName=None):
logging.info("download news page with tag %s"%strTagName)
strNewsHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + u"\\TECHORANGE\\news"
if not os.path.exists(strNewsHtmlFolderPath):
os.mkdir(strNewsHtmlFolderPath) #mkdir source_html/TECHORANGE/news/
#取得 DB 紀錄中,指定 strTagName tag 的 news url
lstStrNewsUrl = self.db.fetchallNewsUrlByTagName(strTagName=strTagName)
intDownloadedNewsCount = 0#紀錄下載 news 頁面數量
timeStart = time.time() #計時開始時間點
timeEnd = None #計時結束時間點
for strNewsUrl in lstStrNewsUrl:
#檢查是否已下載
if not self.db.checkNewsIsGot(strNewsUrl=strNewsUrl):
if intDownloadedNewsCount%10 == 0: #計算下載10筆news所需時間
timeEnd = time.time()
timeCost = timeEnd - timeStart
logging.info("download 10 news cost %f sec"%timeCost)
timeStart = timeEnd
intDownloadedNewsCount = intDownloadedNewsCount+1
time.sleep(random.randint(2,5)) #sleep random time
self.driver.get(strNewsUrl)
#儲存 html
strNewsName = re.match("^https://buzzorange.com/techorange/[\d]{4}/[\d]{2}/[\d]{2}/(.*)/$", strNewsUrl).group(1)
strNewsName = self.limitStrLessThen128Char(strStr=strNewsName) #將名稱縮短小於128字完
strNewsHtmlFilePath = strNewsHtmlFolderPath + u"\\%s_news.html"%strNewsName
self.utility.overwriteSaveAs(strFilePath=strNewsHtmlFilePath, unicodeData=self.driver.page_source)
#更新news DB 為已抓取 (isGot = 1)
self.db.updateNewsStatusIsGot(strNewsUrl=strNewsUrl)
| muchu1983/104_cameo | cameo/spiderForTECHORANGE.py | Python | bsd-3-clause | 7,806 |
import itertools
import os
import re
from abc import ABC, abstractmethod
from glob import glob
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from ..io.image import _read_png_16
from .utils import verify_str_arg
from .vision import VisionDataset
__all__ = (
"KittiFlow",
"Sintel",
"FlyingThings3D",
"FlyingChairs",
"HD1K",
)
class FlowDataset(ABC, VisionDataset):
# Some datasets like Kitti have a built-in valid_flow_mask, indicating which flow values are valid
# For those we return (img1, img2, flow, valid_flow_mask), and for the rest we return (img1, img2, flow),
# and it's up to whatever consumes the dataset to decide what valid_flow_mask should be.
_has_builtin_flow_mask = False
def __init__(self, root, transforms=None):
super().__init__(root=root)
self.transforms = transforms
self._flow_list = []
self._image_list = []
def _read_img(self, file_name):
img = Image.open(file_name)
if img.mode != "RGB":
img = img.convert("RGB")
return img
@abstractmethod
def _read_flow(self, file_name):
# Return the flow or a tuple with the flow and the valid_flow_mask if _has_builtin_flow_mask is True
pass
def __getitem__(self, index):
img1 = self._read_img(self._image_list[index][0])
img2 = self._read_img(self._image_list[index][1])
if self._flow_list: # it will be empty for some dataset when split="test"
flow = self._read_flow(self._flow_list[index])
if self._has_builtin_flow_mask:
flow, valid_flow_mask = flow
else:
valid_flow_mask = None
else:
flow = valid_flow_mask = None
if self.transforms is not None:
img1, img2, flow, valid_flow_mask = self.transforms(img1, img2, flow, valid_flow_mask)
if self._has_builtin_flow_mask or valid_flow_mask is not None:
# The `or valid_flow_mask is not None` part is here because the mask can be generated within a transform
return img1, img2, flow, valid_flow_mask
else:
return img1, img2, flow
def __len__(self):
return len(self._image_list)
def __rmul__(self, v):
return torch.utils.data.ConcatDataset([self] * v)
class Sintel(FlowDataset):
"""`Sintel <http://sintel.is.tue.mpg.de/>`_ Dataset for optical flow.
The dataset is expected to have the following structure: ::
root
Sintel
testing
clean
scene_1
scene_2
...
final
scene_1
scene_2
...
training
clean
scene_1
scene_2
...
final
scene_1
scene_2
...
flow
scene_1
scene_2
...
Args:
root (string): Root directory of the Sintel Dataset.
split (string, optional): The dataset split, either "train" (default) or "test"
pass_name (string, optional): The pass to use, either "clean" (default), "final", or "both". See link above for
details on the different passes.
transforms (callable, optional): A function/transform that takes in
``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
``valid_flow_mask`` is expected for consistency with other datasets which
return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`.
"""
def __init__(self, root, split="train", pass_name="clean", transforms=None):
super().__init__(root=root, transforms=transforms)
verify_str_arg(split, "split", valid_values=("train", "test"))
verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both"))
passes = ["clean", "final"] if pass_name == "both" else [pass_name]
root = Path(root) / "Sintel"
flow_root = root / "training" / "flow"
for pass_name in passes:
split_dir = "training" if split == "train" else split
image_root = root / split_dir / pass_name
for scene in os.listdir(image_root):
image_list = sorted(glob(str(image_root / scene / "*.png")))
for i in range(len(image_list) - 1):
self._image_list += [[image_list[i], image_list[i + 1]]]
if split == "train":
self._flow_list += sorted(glob(str(flow_root / scene / "*.flo")))
def __getitem__(self, index):
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 3-tuple with ``(img1, img2, flow)``.
The flow is a numpy array of shape (2, H, W) and the images are PIL images.
``flow`` is None if ``split="test"``.
If a valid flow mask is generated within the ``transforms`` parameter,
a 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` is returned.
"""
return super().__getitem__(index)
def _read_flow(self, file_name):
return _read_flo(file_name)
class KittiFlow(FlowDataset):
"""`KITTI <http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php?benchmark=flow>`__ dataset for optical flow (2015).
The dataset is expected to have the following structure: ::
root
KittiFlow
testing
image_2
training
image_2
flow_occ
Args:
root (string): Root directory of the KittiFlow Dataset.
split (string, optional): The dataset split, either "train" (default) or "test"
transforms (callable, optional): A function/transform that takes in
``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
"""
_has_builtin_flow_mask = True
def __init__(self, root, split="train", transforms=None):
super().__init__(root=root, transforms=transforms)
verify_str_arg(split, "split", valid_values=("train", "test"))
root = Path(root) / "KittiFlow" / (split + "ing")
images1 = sorted(glob(str(root / "image_2" / "*_10.png")))
images2 = sorted(glob(str(root / "image_2" / "*_11.png")))
if not images1 or not images2:
raise FileNotFoundError(
"Could not find the Kitti flow images. Please make sure the directory structure is correct."
)
for img1, img2 in zip(images1, images2):
self._image_list += [[img1, img2]]
if split == "train":
self._flow_list = sorted(glob(str(root / "flow_occ" / "*_10.png")))
def __getitem__(self, index):
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 4-tuple with ``(img1, img2, flow, valid_flow_mask)``
where ``valid_flow_mask`` is a numpy boolean mask of shape (H, W)
indicating which flow values are valid. The flow is a numpy array of
shape (2, H, W) and the images are PIL images. ``flow`` and ``valid_flow_mask`` are None if
``split="test"``.
"""
return super().__getitem__(index)
def _read_flow(self, file_name):
return _read_16bits_png_with_flow_and_valid_mask(file_name)
class FlyingChairs(FlowDataset):
"""`FlyingChairs <https://lmb.informatik.uni-freiburg.de/resources/datasets/FlyingChairs.en.html#flyingchairs>`_ Dataset for optical flow.
You will also need to download the FlyingChairs_train_val.txt file from the dataset page.
The dataset is expected to have the following structure: ::
root
FlyingChairs
data
00001_flow.flo
00001_img1.ppm
00001_img2.ppm
...
FlyingChairs_train_val.txt
Args:
root (string): Root directory of the FlyingChairs Dataset.
split (string, optional): The dataset split, either "train" (default) or "val"
transforms (callable, optional): A function/transform that takes in
``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
``valid_flow_mask`` is expected for consistency with other datasets which
return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`.
"""
def __init__(self, root, split="train", transforms=None):
super().__init__(root=root, transforms=transforms)
verify_str_arg(split, "split", valid_values=("train", "val"))
root = Path(root) / "FlyingChairs"
images = sorted(glob(str(root / "data" / "*.ppm")))
flows = sorted(glob(str(root / "data" / "*.flo")))
split_file_name = "FlyingChairs_train_val.txt"
if not os.path.exists(root / split_file_name):
raise FileNotFoundError(
"The FlyingChairs_train_val.txt file was not found - please download it from the dataset page (see docstring)."
)
split_list = np.loadtxt(str(root / split_file_name), dtype=np.int32)
for i in range(len(flows)):
split_id = split_list[i]
if (split == "train" and split_id == 1) or (split == "val" and split_id == 2):
self._flow_list += [flows[i]]
self._image_list += [[images[2 * i], images[2 * i + 1]]]
def __getitem__(self, index):
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 3-tuple with ``(img1, img2, flow)``.
The flow is a numpy array of shape (2, H, W) and the images are PIL images.
``flow`` is None if ``split="val"``.
If a valid flow mask is generated within the ``transforms`` parameter,
a 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` is returned.
"""
return super().__getitem__(index)
def _read_flow(self, file_name):
return _read_flo(file_name)
class FlyingThings3D(FlowDataset):
"""`FlyingThings3D <https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html>`_ dataset for optical flow.
The dataset is expected to have the following structure: ::
root
FlyingThings3D
frames_cleanpass
TEST
TRAIN
frames_finalpass
TEST
TRAIN
optical_flow
TEST
TRAIN
Args:
root (string): Root directory of the intel FlyingThings3D Dataset.
split (string, optional): The dataset split, either "train" (default) or "test"
pass_name (string, optional): The pass to use, either "clean" (default) or "final" or "both". See link above for
details on the different passes.
camera (string, optional): Which camera to return images from. Can be either "left" (default) or "right" or "both".
transforms (callable, optional): A function/transform that takes in
``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
``valid_flow_mask`` is expected for consistency with other datasets which
return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`.
"""
def __init__(self, root, split="train", pass_name="clean", camera="left", transforms=None):
super().__init__(root=root, transforms=transforms)
verify_str_arg(split, "split", valid_values=("train", "test"))
split = split.upper()
verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both"))
passes = {
"clean": ["frames_cleanpass"],
"final": ["frames_finalpass"],
"both": ["frames_cleanpass", "frames_finalpass"],
}[pass_name]
verify_str_arg(camera, "camera", valid_values=("left", "right", "both"))
cameras = ["left", "right"] if camera == "both" else [camera]
root = Path(root) / "FlyingThings3D"
directions = ("into_future", "into_past")
for pass_name, camera, direction in itertools.product(passes, cameras, directions):
image_dirs = sorted(glob(str(root / pass_name / split / "*/*")))
image_dirs = sorted(Path(image_dir) / camera for image_dir in image_dirs)
flow_dirs = sorted(glob(str(root / "optical_flow" / split / "*/*")))
flow_dirs = sorted(Path(flow_dir) / direction / camera for flow_dir in flow_dirs)
if not image_dirs or not flow_dirs:
raise FileNotFoundError(
"Could not find the FlyingThings3D flow images. "
"Please make sure the directory structure is correct."
)
for image_dir, flow_dir in zip(image_dirs, flow_dirs):
images = sorted(glob(str(image_dir / "*.png")))
flows = sorted(glob(str(flow_dir / "*.pfm")))
for i in range(len(flows) - 1):
if direction == "into_future":
self._image_list += [[images[i], images[i + 1]]]
self._flow_list += [flows[i]]
elif direction == "into_past":
self._image_list += [[images[i + 1], images[i]]]
self._flow_list += [flows[i + 1]]
def __getitem__(self, index):
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 3-tuple with ``(img1, img2, flow)``.
The flow is a numpy array of shape (2, H, W) and the images are PIL images.
``flow`` is None if ``split="test"``.
If a valid flow mask is generated within the ``transforms`` parameter,
a 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` is returned.
"""
return super().__getitem__(index)
def _read_flow(self, file_name):
return _read_pfm(file_name)
class HD1K(FlowDataset):
"""`HD1K <http://hci-benchmark.iwr.uni-heidelberg.de/>`__ dataset for optical flow.
The dataset is expected to have the following structure: ::
root
hd1k
hd1k_challenge
image_2
hd1k_flow_gt
flow_occ
hd1k_input
image_2
Args:
root (string): Root directory of the HD1K Dataset.
split (string, optional): The dataset split, either "train" (default) or "test"
transforms (callable, optional): A function/transform that takes in
``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
"""
_has_builtin_flow_mask = True
def __init__(self, root, split="train", transforms=None):
super().__init__(root=root, transforms=transforms)
verify_str_arg(split, "split", valid_values=("train", "test"))
root = Path(root) / "hd1k"
if split == "train":
# There are 36 "sequences" and we don't want seq i to overlap with seq i + 1, so we need this for loop
for seq_idx in range(36):
flows = sorted(glob(str(root / "hd1k_flow_gt" / "flow_occ" / f"{seq_idx:06d}_*.png")))
images = sorted(glob(str(root / "hd1k_input" / "image_2" / f"{seq_idx:06d}_*.png")))
for i in range(len(flows) - 1):
self._flow_list += [flows[i]]
self._image_list += [[images[i], images[i + 1]]]
else:
images1 = sorted(glob(str(root / "hd1k_challenge" / "image_2" / "*10.png")))
images2 = sorted(glob(str(root / "hd1k_challenge" / "image_2" / "*11.png")))
for image1, image2 in zip(images1, images2):
self._image_list += [[image1, image2]]
if not self._image_list:
raise FileNotFoundError(
"Could not find the HD1K images. Please make sure the directory structure is correct."
)
def _read_flow(self, file_name):
return _read_16bits_png_with_flow_and_valid_mask(file_name)
def __getitem__(self, index):
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` where ``valid_flow_mask``
is a numpy boolean mask of shape (H, W)
indicating which flow values are valid. The flow is a numpy array of
shape (2, H, W) and the images are PIL images. ``flow`` and ``valid_flow_mask`` are None if
``split="test"``.
"""
return super().__getitem__(index)
def _read_flo(file_name):
"""Read .flo file in Middlebury format"""
# Code adapted from:
# http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
# Everything needs to be in little Endian according to
# https://vision.middlebury.edu/flow/code/flow-code/README.txt
with open(file_name, "rb") as f:
magic = np.fromfile(f, "c", count=4).tobytes()
if magic != b"PIEH":
raise ValueError("Magic number incorrect. Invalid .flo file")
w = int(np.fromfile(f, "<i4", count=1))
h = int(np.fromfile(f, "<i4", count=1))
data = np.fromfile(f, "<f4", count=2 * w * h)
return data.reshape(h, w, 2).transpose(2, 0, 1)
def _read_16bits_png_with_flow_and_valid_mask(file_name):
flow_and_valid = _read_png_16(file_name).to(torch.float32)
flow, valid_flow_mask = flow_and_valid[:2, :, :], flow_and_valid[2, :, :]
flow = (flow - 2 ** 15) / 64 # This conversion is explained somewhere on the kitti archive
valid_flow_mask = valid_flow_mask.bool()
# For consistency with other datasets, we convert to numpy
return flow.numpy(), valid_flow_mask.numpy()
def _read_pfm(file_name):
"""Read flow in .pfm format"""
with open(file_name, "rb") as f:
header = f.readline().rstrip()
if header != b"PF":
raise ValueError("Invalid PFM file")
dim_match = re.match(rb"^(\d+)\s(\d+)\s$", f.readline())
if not dim_match:
raise Exception("Malformed PFM header.")
w, h = (int(dim) for dim in dim_match.groups())
scale = float(f.readline().rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(f, dtype=endian + "f")
data = data.reshape(h, w, 3).transpose(2, 0, 1)
data = np.flip(data, axis=1) # flip on h dimension
data = data[:2, :, :]
return data.astype(np.float32)
| pytorch/vision | torchvision/datasets/_optical_flow.py | Python | bsd-3-clause | 19,330 |
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
__version__ = "1.2.0.11"
| STIXProject/python-stix | stix/version.py | Python | bsd-3-clause | 130 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
class BadOption(Exception):
""" Incorrect HTTP API arguments """
pass
class RenderError(Exception):
""" Error rendering page """
pass
class InternalError(Exception):
""" Unhandled internal error """
pass
class GlobalTimeoutError(Exception):
""" Timeout exceeded rendering page """
pass
class UnsupportedContentType(Exception):
""" Request Content-Type is not supported """
pass
class ExpiredArguments(Exception):
""" Arguments stored with ``save_args`` are expired """
pass
class ScriptError(BadOption):
""" Error happened while executing Lua script """
LUA_INIT_ERROR = 'LUA_INIT_ERROR' # error happened before coroutine starts
LUA_ERROR = 'LUA_ERROR' # lua error() is called from the coroutine
LUA_CONVERT_ERROR = 'LUA_CONVERT_ERROR' # result can't be converted to Python
SPLASH_LUA_ERROR = 'SPLASH_LUA_ERROR' # custom error raised by Splash
BAD_MAIN_ERROR = 'BAD_MAIN_ERROR' # main() definition is incorrect
MAIN_NOT_FOUND_ERROR = 'MAIN_NOT_FOUND_ERROR' # main() is not found
SYNTAX_ERROR = 'SYNTAX_ERROR' # XXX: unused; reported as INIT_ERROR now
JS_ERROR = 'JS_ERROR' # error in a wrapped JS function
UNKNOWN_ERROR = 'UNKNOWN_ERROR'
class JsError(Exception):
""" Error occured in JavaScript code """
pass
class OneShotCallbackError(Exception):
""" A one shot callback was called more than once. """
pass
| Youwotma/splash | splash/exceptions.py | Python | bsd-3-clause | 1,502 |
"Yang/Wu's OEP implementation, in PyQuante."
from math import sqrt
import settings
from PyQuante.NumWrap import zeros,matrixmultiply,transpose,dot,identity,\
array,solve
from PyQuante.Ints import getbasis, getints, getJ,get2JmK,getK
from PyQuante.LA2 import geigh,mkdens,trace2,simx
from PyQuante.hartree_fock import get_fock
from PyQuante.CGBF import three_center
from PyQuante.optimize import fminBFGS
from PyQuante.fermi_dirac import get_efermi, get_fermi_occs,mkdens_occs,\
get_entropy,mkdens_fermi
import logging
logger = logging.getLogger("pyquante")
gradcall=0
class EXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a HF or a DFT calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nclosed, self.nopen = self.molecule.get_closedopen()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbs = self.solver.orbs
self.orbe = self.solver.orbe
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbs,0,self.nclosed)
J0 = getJ(self.Ints,D0)
Vfa = (2.0*(self.nel-1.0)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(self.nbf,'d')
return
def iterate(self,**kwargs):
self.iter = 0
self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature)
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
self.Hoep = get_Hoep(b,self.H0,self.Gij)
self.orbe,self.orbs = geigh(self.Hoep,self.S)
if self.etemp:
self.D,self.entropy = mkdens_fermi(self.nel,self.orbe,self.orbs,
self.etemp)
else:
self.D = mkdens(self.orbs,0,self.nclosed)
self.entropy=0
self.F = get_fock(self.D,self.Ints,self.h)
self.energy = trace2(self.h+self.F,self.D)+self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmo = simx(self.F,self.orbs)
bp = zeros(self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbs)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nclosed):
for a in xrange(self.nclosed,self.norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(self.orbe[i]-self.orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
class UEXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a UHF calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nalpha, self.nbeta = self.molecule.get_alphabeta()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbsa = self.solver.orbsa
self.orbsb = self.solver.orbsb
self.orbea = self.solver.orbea
self.orbeb = self.solver.orbeb
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbsa,0,self.nalpha)+mkdens(self.orbsb,0,self.nbeta)
J0 = getJ(self.Ints,D0)
Vfa = ((self.nel-1.)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(2*self.nbf,'d')
return
def iterate(self,**kwargs):
self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature)
self.iter = 0
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
ba = b[:self.nbf]
bb = b[self.nbf:]
self.Hoepa = get_Hoep(ba,self.H0,self.Gij)
self.Hoepb = get_Hoep(bb,self.H0,self.Gij)
self.orbea,self.orbsa = geigh(self.Hoepa,self.S)
self.orbeb,self.orbsb = geigh(self.Hoepb,self.S)
if self.etemp:
self.Da,entropya = mkdens_fermi(2*self.nalpha,self.orbea,self.orbsa,
self.etemp)
self.Db,entropyb = mkdens_fermi(2*self.nbeta,self.orbeb,self.orbsb,
self.etemp)
self.entropy = 0.5*(entropya+entropyb)
else:
self.Da = mkdens(self.orbsa,0,self.nalpha)
self.Db = mkdens(self.orbsb,0,self.nbeta)
self.entropy=0
J = getJ(self.Ints,self.Da+self.Db)
Ka = getK(self.Ints,self.Da)
Kb = getK(self.Ints,self.Db)
self.Fa = self.h + J - Ka
self.Fb = self.h + J - Kb
self.energy = 0.5*(trace2(self.h+self.Fa,self.Da) +
trace2(self.h+self.Fb,self.Db))\
+ self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmoa = simx(self.Fa,self.orbsa)
Fmob = simx(self.Fb,self.orbsb)
bp = zeros(2*self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsa)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nalpha):
for a in xrange(self.nalpha,self.norb):
bp[g] += Fmoa[i,a]*Gmo[i,a]/(self.orbea[i]-self.orbea[a])
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsb)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nbeta):
for a in xrange(self.nbeta,self.norb):
bp[self.nbf+g] += Fmob[i,a]*Gmo[i,a]/(self.orbeb[i]-self.orbeb[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
def exx(atoms,orbs,**kwargs):
return oep_hf(atoms,orbs,**kwargs)
def oep_hf(atoms,orbs,**kwargs):
"""oep_hf - Form the optimized effective potential for HF exchange.
See notes on options and other args in oep routine.
"""
return oep(atoms,orbs,get_exx_energy,get_exx_gradient,**kwargs)
def oep(atoms,orbs,energy_func,grad_func=None,**kwargs):
"""oep - Form the optimized effective potential for a given energy expression
oep(atoms,orbs,energy_func,grad_func=None,**kwargs)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
energy_func The function that returns the energy for the given method
grad_func The function that returns the force for the given method
Options
-------
verbose False Output terse information to stdout (default)
True Print out additional information
ETemp False Use ETemp value for finite temperature DFT (default)
float Use (float) for the electron temperature
bfs None The basis functions to use. List of CGBF's
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
verbose = kwargs.get('verbose')
ETemp = kwargs.get('ETemp',settings.DFTElectronTemperature)
opt_method = kwargs.get('opt_method',settings.OEPOptMethod)
bfs = getbasis(atoms,**kwargs)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = kwargs.get('pbfs')
if not pbfs: pbfs = bfs
npbf = len(pbfs)
S,h,Ints = getints(bfs,atoms,**kwargs)
nel = atoms.get_nel()
nocc,nopen = atoms.get_closedopen()
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
bp = zeros(nbf,'d')
bvec = kwargs.get('bvec')
if bvec:
assert len(bvec) == npbf
b = array(bvec)
else:
b = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbs,0,nocc)
J0 = getJ(Ints,D0)
Vfa = (2*(nel-1.)/nel)*J0
H0 = h + Vfa
b = fminBFGS(energy_func,b,grad_func,
(nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij),
logger=logging)
energy,orbe,orbs = energy_func(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=1)
return energy,orbe,orbs
def get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs):
"""Computes the energy for the OEP/HF functional
Options:
return_flag 0 Just return the energy
1 Return energy, orbe, orbs
2 Return energy, orbe, orbs, F
"""
return_flag = kwargs.get('return_flag')
Hoep = get_Hoep(b,H0,Gij)
orbe,orbs = geigh(Hoep,S)
if ETemp:
efermi = get_efermi(nel,orbe,ETemp)
occs = get_fermi_occs(efermi,orbe,ETemp)
D = mkdens_occs(orbs,occs)
entropy = get_entropy(occs,ETemp)
else:
D = mkdens(orbs,0,nocc)
F = get_fock(D,Ints,h)
energy = trace2(h+F,D)+Enuke
if ETemp: energy += entropy
iref = nel/2
gap = 627.51*(orbe[iref]-orbe[iref-1])
logging.debug("EXX Energy, B, Gap: %10.5f %10.5f %10.5f"
% (energy,sqrt(dot(b,b)),gap))
#logging.debug("%s" % orbe)
if return_flag == 1:
return energy,orbe,orbs
elif return_flag == 2:
return energy,orbe,orbs,F
return energy
def get_exx_gradient(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs):
"""Computes the gradient for the OEP/HF functional.
return_flag 0 Just return gradient
1 Return energy,gradient
2 Return energy,gradient,orbe,orbs
"""
# Dump the gradient every 10 steps so we can restart...
global gradcall
gradcall += 1
#if gradcall % 5 == 0: logging.debug("B vector:\n%s" % b)
# Form the new potential and the new orbitals
energy,orbe,orbs,F = get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=2)
Fmo = matrixmultiply(transpose(orbs),matrixmultiply(F,orbs))
norb = nbf
bp = zeros(nbf,'d') # dE/db
for g in xrange(nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = matrixmultiply(transpose(orbs),matrixmultiply(Gij[g],orbs))
# Now sum the appropriate terms to get the b gradient
for i in xrange(nocc):
for a in xrange(nocc,norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(orbe[i]-orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return_flag = kwargs.get('return_flag')
if return_flag == 1:
return energy,bp
elif return_flag == 2:
return energy,bp,orbe,orbs
return bp
def get_Hoep(b,H0,Gij):
Hoep = H0
# Add the contributions from the gaussian potential functions
# H[ij] += b[g]*<ibf|g|jbf>
for g in xrange(len(b)):
Hoep = Hoep + b[g]*Gij[g]
return Hoep
# Here's a much faster way to do this. Haven't figured out how to
# do it for more generic functions like OEP-GVB
def oep_hf_an(atoms,orbs,**kwargs):
"""oep_hf - Form the optimized effective potential for HF exchange.
Implementation of Wu and Yang's Approximate Newton Scheme
from J. Theor. Comp. Chem. 2, 627 (2003).
oep_hf(atoms,orbs,**kwargs)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
Options
-------
bfs None The basis functions to use for the wfn
pbfs None The basis functions to use for the pot
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
maxiter = kwargs.get('maxiter',settings.OEPIters)
tol = kwargs.get('tol',settings.OEPTolerance)
bfs = getbasis(atoms,**kwargs)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = kwargs.get('pbfs')
if not pbfs: pbfs = bfs
npbf = len(pbfs)
S,h,Ints = getints(bfs,atoms)
nel = atoms.get_nel()
nocc,nopen = atoms.get_closedopen()
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
bp = zeros(nbf,'d')
bvec = kwargs.get('bvec')
if bvec:
assert len(bvec) == npbf
b = array(bvec)
else:
b = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbs,0,nocc)
J0 = getJ(Ints,D0)
Vfa = (2*(nel-1.)/nel)*J0
H0 = h + Vfa
b = zeros(nbf,'d')
eold = 0
for iter in xrange(maxiter):
Hoep = get_Hoep(b,H0,Gij)
orbe,orbs = geigh(Hoep,S)
D = mkdens(orbs,0,nocc)
Vhf = get2JmK(Ints,D)
energy = trace2(2*h+Vhf,D)+Enuke
if abs(energy-eold) < tol:
break
else:
eold = energy
logging.debug("OEP AN Opt: %d %f" % (iter,energy))
dV_ao = Vhf-Vfa
dV = matrixmultiply(transpose(orbs),matrixmultiply(dV_ao,orbs))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
Gkt = zeros((nbf,nbf),'d')
for k in xrange(nbf):
# This didn't work; in fact, it made things worse:
Gk = matrixmultiply(transpose(orbs),matrixmultiply(Gij[k],orbs))
for i in xrange(nocc):
for a in xrange(nocc,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbe[i]-orbe[a])
for l in xrange(nbf):
Gl = matrixmultiply(transpose(orbs),matrixmultiply(Gij[l],orbs))
for i in xrange(nocc):
for a in xrange(nocc,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbe[i]-orbe[a])
# This should actually be a pseudoinverse...
b = solve(X,c)
logger.info("Final OEP energy = %f" % energy)
return energy,orbe,orbs
def oep_uhf_an(atoms,orbsa,orbsb,**kwargs):
"""oep_hf - Form the optimized effective potential for HF exchange.
Implementation of Wu and Yang's Approximate Newton Scheme
from J. Theor. Comp. Chem. 2, 627 (2003).
oep_uhf(atoms,orbs,**kwargs)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
Options
-------
bfs None The basis functions to use for the wfn
pbfs None The basis functions to use for the pot
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
maxiter = kwargs.get('maxiter',settings.OEPIters)
tol = kwargs.get('tol',settings.OEPTolerance)
ETemp = kwargs.get('ETemp',settings.DFTElectronTemperature)
bfs = getbasis(atoms,**kwargs)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = kwargs.get('pbfs')
if not pbfs: pbfs = bfs
npbf = len(pbfs)
S,h,Ints = getints(bfs,atoms,**kwargs)
nel = atoms.get_nel()
nclosed,nopen = atoms.get_closedopen()
nalpha,nbeta = nclosed+nopen,nclosed
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
ba = zeros(npbf,'d')
bb = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbsa,0,nalpha)+mkdens(orbsb,0,nbeta)
J0 = getJ(Ints,D0)
Vfa = ((nel-1.)/nel)*J0
H0 = h + Vfa
eold = 0
for iter in xrange(maxiter):
Hoepa = get_Hoep(ba,H0,Gij)
Hoepb = get_Hoep(ba,H0,Gij)
orbea,orbsa = geigh(Hoepa,S)
orbeb,orbsb = geigh(Hoepb,S)
if ETemp:
efermia = get_efermi(2*nalpha,orbea,ETemp)
occsa = get_fermi_occs(efermia,orbea,ETemp)
Da = mkdens_occs(orbsa,occsa)
efermib = get_efermi(2*nbeta,orbeb,ETemp)
occsb = get_fermi_occs(efermib,orbeb,ETemp)
Db = mkdens_occs(orbsb,occsb)
entropy = 0.5*(get_entropy(occsa,ETemp)+get_entropy(occsb,ETemp))
else:
Da = mkdens(orbsa,0,nalpha)
Db = mkdens(orbsb,0,nbeta)
J = getJ(Ints,Da) + getJ(Ints,Db)
Ka = getK(Ints,Da)
Kb = getK(Ints,Db)
energy = (trace2(2*h+J-Ka,Da)+trace2(2*h+J-Kb,Db))/2\
+Enuke
if ETemp: energy += entropy
if abs(energy-eold) < tol:
break
else:
eold = energy
logging.debug("OEP AN Opt: %d %f" % (iter,energy))
# Do alpha and beta separately
# Alphas
dV_ao = J-Ka-Vfa
dV = matrixmultiply(orbsa,matrixmultiply(dV_ao,transpose(orbsa)))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
for k in xrange(nbf):
Gk = matrixmultiply(orbsa,matrixmultiply(Gij[k],
transpose(orbsa)))
for i in xrange(nalpha):
for a in xrange(nalpha,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbea[i]-orbea[a])
for l in xrange(nbf):
Gl = matrixmultiply(orbsa,matrixmultiply(Gij[l],
transpose(orbsa)))
for i in xrange(nalpha):
for a in xrange(nalpha,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbea[i]-orbea[a])
# This should actually be a pseudoinverse...
ba = solve(X,c)
# Betas
dV_ao = J-Kb-Vfa
dV = matrixmultiply(orbsb,matrixmultiply(dV_ao,transpose(orbsb)))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
for k in xrange(nbf):
Gk = matrixmultiply(orbsb,matrixmultiply(Gij[k],
transpose(orbsb)))
for i in xrange(nbeta):
for a in xrange(nbeta,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbeb[i]-orbeb[a])
for l in xrange(nbf):
Gl = matrixmultiply(orbsb,matrixmultiply(Gij[l],
transpose(orbsb)))
for i in xrange(nbeta):
for a in xrange(nbeta,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbeb[i]-orbeb[a])
# This should actually be a pseudoinverse...
bb = solve(X,c)
logger.info("Final OEP energy = %f" % energy)
return energy,(orbea,orbeb),(orbsa,orbsb)
def test_old():
from PyQuante.Molecule import Molecule
from PyQuante.Ints import getbasis,getints
from PyQuante.hartree_fock import rhf
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
#mol = Molecule('HF',[('H',(0.,0.,0.)),('F',(0.,0.,0.898369))],
# units='Angstrom')
mol = Molecule('LiH',[(1,(0,0,1.5)),(3,(0,0,-1.5))],units = 'Bohr')
bfs = getbasis(mol)
S,h,Ints = getints(bfs,mol)
print "after integrals"
E_hf,orbe_hf,orbs_hf = rhf(mol,bfs=bfs,integrals=(S,h,Ints),DoAveraging=True)
print "RHF energy = ",E_hf
E_exx,orbe_exx,orbs_exx = exx(mol,orbs_hf,bfs=bfs,integrals=(S,h,Ints))
return
def test():
from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
mol = Molecule("He",[(2,(0,0,0))])
solver = HFSolver(mol)
solver.iterate()
print "HF energy = ",solver.energy
dft_solver = DFTSolver(mol)
dft_solver.iterate()
print "DFT energy = ",dft_solver.energy
oep = EXXSolver(solver)
# Testing 0 temp
oep.iterate()
# Testing finite temp
oep.iterate(etemp=40000)
return
def utest():
from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
mol = Molecule("He",[(2,(0,0,0))])
mol = Molecule("Li",[(3,(0,0,0))],multiplicity=2)
solver = UHFSolver(mol)
solver.iterate()
print "HF energy = ",solver.energy
dft_solver = DFTSolver(mol)
dft_solver.iterate()
print "DFT energy = ",dft_solver.energy
oep = UEXXSolver(solver)
# Testing 0 temp
oep.iterate()
# Testing finite temp
oep.iterate(etemp=10000)
return
if __name__ == '__main__':
test()
utest()
| berquist/PyQuante | PyQuante/OEP.py | Python | bsd-3-clause | 25,427 |
def extractLittlebambooHomeBlog(item):
'''
Parser for 'littlebamboo.home.blog'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('FW', 'Fortunate Wife', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractLittlebambooHomeBlog.py | Python | bsd-3-clause | 622 |
from __future__ import print_function, division
from sympy.core import S, sympify, cacheit, pi, I, Rational
from sympy.core.add import Add
from sympy.core.function import Function, ArgumentIndexError, _coeff_isneg
from sympy.functions.combinatorial.factorials import factorial, RisingFactorial
from sympy.functions.elementary.exponential import exp, log, match_real_imag
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.integers import floor
from sympy import pi, Eq
from sympy.logic import Or, And
from sympy.core.logic import fuzzy_or, fuzzy_and, fuzzy_bool
def _rewrite_hyperbolics_as_exp(expr):
expr = sympify(expr)
return expr.xreplace({h: h.rewrite(exp)
for h in expr.atoms(HyperbolicFunction)})
###############################################################################
########################### HYPERBOLIC FUNCTIONS ##############################
###############################################################################
class HyperbolicFunction(Function):
"""
Base class for hyperbolic functions.
See Also
========
sinh, cosh, tanh, coth
"""
unbranched = True
def _peeloff_ipi(arg):
"""
Split ARG into two parts, a "rest" and a multiple of I*pi/2.
This assumes ARG to be an Add.
The multiple of I*pi returned in the second position is always a Rational.
Examples
========
>>> from sympy.functions.elementary.hyperbolic import _peeloff_ipi as peel
>>> from sympy import pi, I
>>> from sympy.abc import x, y
>>> peel(x + I*pi/2)
(x, I*pi/2)
>>> peel(x + I*2*pi/3 + I*pi*y)
(x + I*pi*y + I*pi/6, I*pi/2)
"""
for a in Add.make_args(arg):
if a == S.Pi*S.ImaginaryUnit:
K = S.One
break
elif a.is_Mul:
K, p = a.as_two_terms()
if p == S.Pi*S.ImaginaryUnit and K.is_Rational:
break
else:
return arg, S.Zero
m1 = (K % S.Half)*S.Pi*S.ImaginaryUnit
m2 = K*S.Pi*S.ImaginaryUnit - m1
return arg - m2, m2
class sinh(HyperbolicFunction):
r"""
The hyperbolic sine function, `\frac{e^x - e^{-x}}{2}`.
* sinh(x) -> Returns the hyperbolic sine of x
See Also
========
cosh, tanh, asinh
"""
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return cosh(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return asinh
@classmethod
def eval(cls, arg):
from sympy import sin
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.NegativeInfinity
elif arg.is_zero:
return S.Zero
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.NaN
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return S.ImaginaryUnit * sin(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.is_Add:
x, m = _peeloff_ipi(arg)
if m:
return sinh(m)*cosh(x) + cosh(m)*sinh(x)
if arg.is_zero:
return S.Zero
if arg.func == asinh:
return arg.args[0]
if arg.func == acosh:
x = arg.args[0]
return sqrt(x - 1) * sqrt(x + 1)
if arg.func == atanh:
x = arg.args[0]
return x/sqrt(1 - x**2)
if arg.func == acoth:
x = arg.args[0]
return 1/(sqrt(x - 1) * sqrt(x + 1))
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
"""
Returns the next term in the Taylor series expansion.
"""
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2:
p = previous_terms[-2]
return p * x**2 / (n*(n - 1))
else:
return x**(n) / factorial(n)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
"""
Returns this function as a complex coordinate.
"""
from sympy import cos, sin
if self.args[0].is_extended_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return (sinh(re)*cos(im), cosh(re)*sin(im))
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=deep, **hints)
return re_part + im_part*S.ImaginaryUnit
def _eval_expand_trig(self, deep=True, **hints):
if deep:
arg = self.args[0].expand(deep, **hints)
else:
arg = self.args[0]
x = None
if arg.is_Add: # TODO, implement more if deep stuff here
x, y = arg.as_two_terms()
else:
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One and coeff.is_Integer and terms is not S.One:
x = terms
y = (coeff - 1)*x
if x is not None:
return (sinh(x)*cosh(y) + sinh(y)*cosh(x)).expand(trig=True)
return sinh(arg)
def _eval_rewrite_as_tractable(self, arg, **kwargs):
return (exp(arg) - exp(-arg)) / 2
def _eval_rewrite_as_exp(self, arg, **kwargs):
return (exp(arg) - exp(-arg)) / 2
def _eval_rewrite_as_cosh(self, arg, **kwargs):
return -S.ImaginaryUnit*cosh(arg + S.Pi*S.ImaginaryUnit/2)
def _eval_rewrite_as_tanh(self, arg, **kwargs):
tanh_half = tanh(S.Half*arg)
return 2*tanh_half/(1 - tanh_half**2)
def _eval_rewrite_as_coth(self, arg, **kwargs):
coth_half = coth(S.Half*arg)
return 2*coth_half/(coth_half**2 - 1)
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_is_real(self):
arg = self.args[0]
if arg.is_real:
return True
# if `im` is of the form n*pi
# else, check if it is a number
re, im = arg.as_real_imag()
return (im%pi).is_zero
def _eval_is_extended_real(self):
if self.args[0].is_extended_real:
return True
def _eval_is_positive(self):
if self.args[0].is_extended_real:
return self.args[0].is_positive
def _eval_is_negative(self):
if self.args[0].is_extended_real:
return self.args[0].is_negative
def _eval_is_finite(self):
arg = self.args[0]
return arg.is_finite
def _eval_is_zero(self):
arg = self.args[0]
if arg.is_zero:
return True
class cosh(HyperbolicFunction):
r"""
The hyperbolic cosine function, `\frac{e^x + e^{-x}}{2}`.
* cosh(x) -> Returns the hyperbolic cosine of x
See Also
========
sinh, tanh, acosh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return sinh(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy import cos
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Infinity
elif arg.is_zero:
return S.One
elif arg.is_negative:
return cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.NaN
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return cos(i_coeff)
else:
if _coeff_isneg(arg):
return cls(-arg)
if arg.is_Add:
x, m = _peeloff_ipi(arg)
if m:
return cosh(m)*cosh(x) + sinh(m)*sinh(x)
if arg.is_zero:
return S.One
if arg.func == asinh:
return sqrt(1 + arg.args[0]**2)
if arg.func == acosh:
return arg.args[0]
if arg.func == atanh:
return 1/sqrt(1 - arg.args[0]**2)
if arg.func == acoth:
x = arg.args[0]
return x/(sqrt(x - 1) * sqrt(x + 1))
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 1:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2:
p = previous_terms[-2]
return p * x**2 / (n*(n - 1))
else:
return x**(n)/factorial(n)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
from sympy import cos, sin
if self.args[0].is_extended_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return (cosh(re)*cos(im), sinh(re)*sin(im))
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=deep, **hints)
return re_part + im_part*S.ImaginaryUnit
def _eval_expand_trig(self, deep=True, **hints):
if deep:
arg = self.args[0].expand(deep, **hints)
else:
arg = self.args[0]
x = None
if arg.is_Add: # TODO, implement more if deep stuff here
x, y = arg.as_two_terms()
else:
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One and coeff.is_Integer and terms is not S.One:
x = terms
y = (coeff - 1)*x
if x is not None:
return (cosh(x)*cosh(y) + sinh(x)*sinh(y)).expand(trig=True)
return cosh(arg)
def _eval_rewrite_as_tractable(self, arg, **kwargs):
return (exp(arg) + exp(-arg)) / 2
def _eval_rewrite_as_exp(self, arg, **kwargs):
return (exp(arg) + exp(-arg)) / 2
def _eval_rewrite_as_sinh(self, arg, **kwargs):
return -S.ImaginaryUnit*sinh(arg + S.Pi*S.ImaginaryUnit/2)
def _eval_rewrite_as_tanh(self, arg, **kwargs):
tanh_half = tanh(S.Half*arg)**2
return (1 + tanh_half)/(1 - tanh_half)
def _eval_rewrite_as_coth(self, arg, **kwargs):
coth_half = coth(S.Half*arg)**2
return (coth_half + 1)/(coth_half - 1)
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return S.One
else:
return self.func(arg)
def _eval_is_real(self):
arg = self.args[0]
# `cosh(x)` is real for real OR purely imaginary `x`
if arg.is_real or arg.is_imaginary:
return True
# cosh(a+ib) = cos(b)*cosh(a) + i*sin(b)*sinh(a)
# the imaginary part can be an expression like n*pi
# if not, check if the imaginary part is a number
re, im = arg.as_real_imag()
return (im%pi).is_zero
def _eval_is_positive(self):
# cosh(x+I*y) = cos(y)*cosh(x) + I*sin(y)*sinh(x)
# cosh(z) is positive iff it is real and the real part is positive.
# So we need sin(y)*sinh(x) = 0 which gives x=0 or y=n*pi
# Case 1 (y=n*pi): cosh(z) = (-1)**n * cosh(x) -> positive for n even
# Case 2 (x=0): cosh(z) = cos(y) -> positive when cos(y) is positive
z = self.args[0]
x, y = z.as_real_imag()
ymod = y % (2*pi)
yzero = ymod.is_zero
# shortcut if ymod is zero
if yzero:
return True
xzero = x.is_zero
# shortcut x is not zero
if xzero is False:
return yzero
return fuzzy_or([
# Case 1:
yzero,
# Case 2:
fuzzy_and([
xzero,
fuzzy_or([ymod < pi/2, ymod > 3*pi/2])
])
])
def _eval_is_nonnegative(self):
z = self.args[0]
x, y = z.as_real_imag()
ymod = y % (2*pi)
yzero = ymod.is_zero
# shortcut if ymod is zero
if yzero:
return True
xzero = x.is_zero
# shortcut x is not zero
if xzero is False:
return yzero
return fuzzy_or([
# Case 1:
yzero,
# Case 2:
fuzzy_and([
xzero,
fuzzy_or([ymod <= pi/2, ymod >= 3*pi/2])
])
])
def _eval_is_finite(self):
arg = self.args[0]
return arg.is_finite
class tanh(HyperbolicFunction):
r"""
The hyperbolic tangent function, `\frac{\sinh(x)}{\cosh(x)}`.
* tanh(x) -> Returns the hyperbolic tangent of x
See Also
========
sinh, cosh, atanh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return S.One - tanh(self.args[0])**2
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return atanh
@classmethod
def eval(cls, arg):
from sympy import tan
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.One
elif arg is S.NegativeInfinity:
return S.NegativeOne
elif arg.is_zero:
return S.Zero
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.NaN
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
if _coeff_isneg(i_coeff):
return -S.ImaginaryUnit * tan(-i_coeff)
return S.ImaginaryUnit * tan(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.is_Add:
x, m = _peeloff_ipi(arg)
if m:
tanhm = tanh(m)
if tanhm is S.ComplexInfinity:
return coth(x)
else: # tanhm == 0
return tanh(x)
if arg.is_zero:
return S.Zero
if arg.func == asinh:
x = arg.args[0]
return x/sqrt(1 + x**2)
if arg.func == acosh:
x = arg.args[0]
return sqrt(x - 1) * sqrt(x + 1) / x
if arg.func == atanh:
return arg.args[0]
if arg.func == acoth:
return 1/arg.args[0]
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
from sympy import bernoulli
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
a = 2**(n + 1)
B = bernoulli(n + 1)
F = factorial(n + 1)
return a*(a - 1) * B/F * x**n
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
from sympy import cos, sin
if self.args[0].is_extended_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
denom = sinh(re)**2 + cos(im)**2
return (sinh(re)*cosh(re)/denom, sin(im)*cos(im)/denom)
def _eval_rewrite_as_tractable(self, arg, **kwargs):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp - neg_exp)/(pos_exp + neg_exp)
def _eval_rewrite_as_exp(self, arg, **kwargs):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp - neg_exp)/(pos_exp + neg_exp)
def _eval_rewrite_as_sinh(self, arg, **kwargs):
return S.ImaginaryUnit*sinh(arg)/sinh(S.Pi*S.ImaginaryUnit/2 - arg)
def _eval_rewrite_as_cosh(self, arg, **kwargs):
return S.ImaginaryUnit*cosh(S.Pi*S.ImaginaryUnit/2 - arg)/cosh(arg)
def _eval_rewrite_as_coth(self, arg, **kwargs):
return 1/coth(arg)
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_is_real(self):
from sympy import cos, sinh
arg = self.args[0]
if arg.is_real:
return True
re, im = arg.as_real_imag()
# if denom = 0, tanh(arg) = zoo
if re == 0 and im % pi == pi/2:
return None
# check if im is of the form n*pi/2 to make sin(2*im) = 0
# if not, im could be a number, return False in that case
return (im % (pi/2)).is_zero
def _eval_is_extended_real(self):
if self.args[0].is_extended_real:
return True
def _eval_is_positive(self):
if self.args[0].is_extended_real:
return self.args[0].is_positive
def _eval_is_negative(self):
if self.args[0].is_extended_real:
return self.args[0].is_negative
def _eval_is_finite(self):
from sympy import sinh, cos
arg = self.args[0]
re, im = arg.as_real_imag()
denom = cos(im)**2 + sinh(re)**2
if denom == 0:
return False
elif denom.is_number:
return True
if arg.is_extended_real:
return True
def _eval_is_zero(self):
arg = self.args[0]
if arg.is_zero:
return True
class coth(HyperbolicFunction):
r"""
The hyperbolic cotangent function, `\frac{\cosh(x)}{\sinh(x)}`.
* coth(x) -> Returns the hyperbolic cotangent of x
"""
def fdiff(self, argindex=1):
if argindex == 1:
return -1/sinh(self.args[0])**2
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return acoth
@classmethod
def eval(cls, arg):
from sympy import cot
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.One
elif arg is S.NegativeInfinity:
return S.NegativeOne
elif arg.is_zero:
return S.ComplexInfinity
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.NaN
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
if _coeff_isneg(i_coeff):
return S.ImaginaryUnit * cot(-i_coeff)
return -S.ImaginaryUnit * cot(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.is_Add:
x, m = _peeloff_ipi(arg)
if m:
cothm = coth(m)
if cothm is S.ComplexInfinity:
return coth(x)
else: # cothm == 0
return tanh(x)
if arg.is_zero:
return S.ComplexInfinity
if arg.func == asinh:
x = arg.args[0]
return sqrt(1 + x**2)/x
if arg.func == acosh:
x = arg.args[0]
return x/(sqrt(x - 1) * sqrt(x + 1))
if arg.func == atanh:
return 1/arg.args[0]
if arg.func == acoth:
return arg.args[0]
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
from sympy import bernoulli
if n == 0:
return 1 / sympify(x)
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
B = bernoulli(n + 1)
F = factorial(n + 1)
return 2**(n + 1) * B/F * x**n
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
from sympy import cos, sin
if self.args[0].is_extended_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
denom = sinh(re)**2 + sin(im)**2
return (sinh(re)*cosh(re)/denom, -sin(im)*cos(im)/denom)
def _eval_rewrite_as_tractable(self, arg, **kwargs):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp + neg_exp)/(pos_exp - neg_exp)
def _eval_rewrite_as_exp(self, arg, **kwargs):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp + neg_exp)/(pos_exp - neg_exp)
def _eval_rewrite_as_sinh(self, arg, **kwargs):
return -S.ImaginaryUnit*sinh(S.Pi*S.ImaginaryUnit/2 - arg)/sinh(arg)
def _eval_rewrite_as_cosh(self, arg, **kwargs):
return -S.ImaginaryUnit*cosh(arg)/cosh(S.Pi*S.ImaginaryUnit/2 - arg)
def _eval_rewrite_as_tanh(self, arg, **kwargs):
return 1/tanh(arg)
def _eval_is_positive(self):
if self.args[0].is_extended_real:
return self.args[0].is_positive
def _eval_is_negative(self):
if self.args[0].is_extended_real:
return self.args[0].is_negative
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return 1/arg
else:
return self.func(arg)
class ReciprocalHyperbolicFunction(HyperbolicFunction):
"""Base class for reciprocal functions of hyperbolic functions. """
#To be defined in class
_reciprocal_of = None
_is_even = None
_is_odd = None
@classmethod
def eval(cls, arg):
if arg.could_extract_minus_sign():
if cls._is_even:
return cls(-arg)
if cls._is_odd:
return -cls(-arg)
t = cls._reciprocal_of.eval(arg)
if hasattr(arg, 'inverse') and arg.inverse() == cls:
return arg.args[0]
return 1/t if t is not None else t
def _call_reciprocal(self, method_name, *args, **kwargs):
# Calls method_name on _reciprocal_of
o = self._reciprocal_of(self.args[0])
return getattr(o, method_name)(*args, **kwargs)
def _calculate_reciprocal(self, method_name, *args, **kwargs):
# If calling method_name on _reciprocal_of returns a value != None
# then return the reciprocal of that value
t = self._call_reciprocal(method_name, *args, **kwargs)
return 1/t if t is not None else t
def _rewrite_reciprocal(self, method_name, arg):
# Special handling for rewrite functions. If reciprocal rewrite returns
# unmodified expression, then return None
t = self._call_reciprocal(method_name, arg)
if t is not None and t != self._reciprocal_of(arg):
return 1/t
def _eval_rewrite_as_exp(self, arg, **kwargs):
return self._rewrite_reciprocal("_eval_rewrite_as_exp", arg)
def _eval_rewrite_as_tractable(self, arg, **kwargs):
return self._rewrite_reciprocal("_eval_rewrite_as_tractable", arg)
def _eval_rewrite_as_tanh(self, arg, **kwargs):
return self._rewrite_reciprocal("_eval_rewrite_as_tanh", arg)
def _eval_rewrite_as_coth(self, arg, **kwargs):
return self._rewrite_reciprocal("_eval_rewrite_as_coth", arg)
def as_real_imag(self, deep = True, **hints):
return (1 / self._reciprocal_of(self.args[0])).as_real_imag(deep, **hints)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=True, **hints)
return re_part + S.ImaginaryUnit*im_part
def _eval_as_leading_term(self, x):
return (1/self._reciprocal_of(self.args[0]))._eval_as_leading_term(x)
def _eval_is_extended_real(self):
return self._reciprocal_of(self.args[0]).is_extended_real
def _eval_is_finite(self):
return (1/self._reciprocal_of(self.args[0])).is_finite
class csch(ReciprocalHyperbolicFunction):
r"""
The hyperbolic cosecant function, `\frac{2}{e^x - e^{-x}}`
* csch(x) -> Returns the hyperbolic cosecant of x
See Also
========
sinh, cosh, tanh, sech, asinh, acosh
"""
_reciprocal_of = sinh
_is_odd = True
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function
"""
if argindex == 1:
return -coth(self.args[0]) * csch(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
"""
Returns the next term in the Taylor series expansion
"""
from sympy import bernoulli
if n == 0:
return 1/sympify(x)
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
B = bernoulli(n + 1)
F = factorial(n + 1)
return 2 * (1 - 2**n) * B/F * x**n
def _eval_rewrite_as_cosh(self, arg, **kwargs):
return S.ImaginaryUnit / cosh(arg + S.ImaginaryUnit * S.Pi / 2)
def _eval_is_positive(self):
if self.args[0].is_extended_real:
return self.args[0].is_positive
def _eval_is_negative(self):
if self.args[0].is_extended_real:
return self.args[0].is_negative
def _sage_(self):
import sage.all as sage
return sage.csch(self.args[0]._sage_())
class sech(ReciprocalHyperbolicFunction):
r"""
The hyperbolic secant function, `\frac{2}{e^x + e^{-x}}`
* sech(x) -> Returns the hyperbolic secant of x
See Also
========
sinh, cosh, tanh, coth, csch, asinh, acosh
"""
_reciprocal_of = cosh
_is_even = True
def fdiff(self, argindex=1):
if argindex == 1:
return - tanh(self.args[0])*sech(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
from sympy.functions.combinatorial.numbers import euler
if n < 0 or n % 2 == 1:
return S.Zero
else:
x = sympify(x)
return euler(n) / factorial(n) * x**(n)
def _eval_rewrite_as_sinh(self, arg, **kwargs):
return S.ImaginaryUnit / sinh(arg + S.ImaginaryUnit * S.Pi /2)
def _eval_is_positive(self):
if self.args[0].is_extended_real:
return True
def _sage_(self):
import sage.all as sage
return sage.sech(self.args[0]._sage_())
###############################################################################
############################# HYPERBOLIC INVERSES #############################
###############################################################################
class InverseHyperbolicFunction(Function):
"""Base class for inverse hyperbolic functions."""
pass
class asinh(InverseHyperbolicFunction):
"""
The inverse hyperbolic sine function.
* asinh(x) -> Returns the inverse hyperbolic sine of x
See Also
========
acosh, atanh, sinh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/sqrt(self.args[0]**2 + 1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy import asin
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.NegativeInfinity
elif arg.is_zero:
return S.Zero
elif arg is S.One:
return log(sqrt(2) + 1)
elif arg is S.NegativeOne:
return log(sqrt(2) - 1)
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.ComplexInfinity
if arg.is_zero:
return S.Zero
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return S.ImaginaryUnit * asin(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if isinstance(arg, sinh) and arg.args[0].is_number:
z = arg.args[0]
if z.is_real:
return z
r, i = match_real_imag(z)
if r is not None and i is not None:
f = floor((i + pi/2)/pi)
m = z - I*pi*f
even = f.is_even
if even is True:
return m
elif even is False:
return -m
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) >= 2 and n > 2:
p = previous_terms[-2]
return -p * (n - 2)**2/(n*(n - 1)) * x**2
else:
k = (n - 1) // 2
R = RisingFactorial(S.Half, k)
F = factorial(k)
return (-1)**k * R / F * x**n / n
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_rewrite_as_log(self, x, **kwargs):
return log(x + sqrt(x**2 + 1))
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return sinh
def _eval_is_zero(self):
arg = self.args[0]
if arg.is_zero:
return True
class acosh(InverseHyperbolicFunction):
"""
The inverse hyperbolic cosine function.
* acosh(x) -> Returns the inverse hyperbolic cosine of x
See Also
========
asinh, atanh, cosh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/sqrt(self.args[0]**2 - 1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Infinity
elif arg.is_zero:
return S.Pi*S.ImaginaryUnit / 2
elif arg is S.One:
return S.Zero
elif arg is S.NegativeOne:
return S.Pi*S.ImaginaryUnit
if arg.is_number:
cst_table = {
S.ImaginaryUnit: log(S.ImaginaryUnit*(1 + sqrt(2))),
-S.ImaginaryUnit: log(-S.ImaginaryUnit*(1 + sqrt(2))),
S.Half: S.Pi/3,
Rational(-1, 2): S.Pi*Rational(2, 3),
sqrt(2)/2: S.Pi/4,
-sqrt(2)/2: S.Pi*Rational(3, 4),
1/sqrt(2): S.Pi/4,
-1/sqrt(2): S.Pi*Rational(3, 4),
sqrt(3)/2: S.Pi/6,
-sqrt(3)/2: S.Pi*Rational(5, 6),
(sqrt(3) - 1)/sqrt(2**3): S.Pi*Rational(5, 12),
-(sqrt(3) - 1)/sqrt(2**3): S.Pi*Rational(7, 12),
sqrt(2 + sqrt(2))/2: S.Pi/8,
-sqrt(2 + sqrt(2))/2: S.Pi*Rational(7, 8),
sqrt(2 - sqrt(2))/2: S.Pi*Rational(3, 8),
-sqrt(2 - sqrt(2))/2: S.Pi*Rational(5, 8),
(1 + sqrt(3))/(2*sqrt(2)): S.Pi/12,
-(1 + sqrt(3))/(2*sqrt(2)): S.Pi*Rational(11, 12),
(sqrt(5) + 1)/4: S.Pi/5,
-(sqrt(5) + 1)/4: S.Pi*Rational(4, 5)
}
if arg in cst_table:
if arg.is_extended_real:
return cst_table[arg]*S.ImaginaryUnit
return cst_table[arg]
if arg is S.ComplexInfinity:
return S.ComplexInfinity
if arg == S.ImaginaryUnit*S.Infinity:
return S.Infinity + S.ImaginaryUnit*S.Pi/2
if arg == -S.ImaginaryUnit*S.Infinity:
return S.Infinity - S.ImaginaryUnit*S.Pi/2
if arg.is_zero:
return S.Pi*S.ImaginaryUnit*S.Half
if isinstance(arg, cosh) and arg.args[0].is_number:
z = arg.args[0]
if z.is_real:
from sympy.functions.elementary.complexes import Abs
return Abs(z)
r, i = match_real_imag(z)
if r is not None and i is not None:
f = floor(i/pi)
m = z - I*pi*f
even = f.is_even
if even is True:
if r.is_nonnegative:
return m
elif r.is_negative:
return -m
elif even is False:
m -= I*pi
if r.is_nonpositive:
return -m
elif r.is_positive:
return m
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return S.Pi*S.ImaginaryUnit / 2
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) >= 2 and n > 2:
p = previous_terms[-2]
return p * (n - 2)**2/(n*(n - 1)) * x**2
else:
k = (n - 1) // 2
R = RisingFactorial(S.Half, k)
F = factorial(k)
return -R / F * S.ImaginaryUnit * x**n / n
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return S.ImaginaryUnit*S.Pi/2
else:
return self.func(arg)
def _eval_rewrite_as_log(self, x, **kwargs):
return log(x + sqrt(x + 1) * sqrt(x - 1))
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return cosh
class atanh(InverseHyperbolicFunction):
"""
The inverse hyperbolic tangent function.
* atanh(x) -> Returns the inverse hyperbolic tangent of x
See Also
========
asinh, acosh, tanh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/(1 - self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy import atan
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg.is_zero:
return S.Zero
elif arg is S.One:
return S.Infinity
elif arg is S.NegativeOne:
return S.NegativeInfinity
elif arg is S.Infinity:
return -S.ImaginaryUnit * atan(arg)
elif arg is S.NegativeInfinity:
return S.ImaginaryUnit * atan(-arg)
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
from sympy.calculus.util import AccumBounds
return S.ImaginaryUnit*AccumBounds(-S.Pi/2, S.Pi/2)
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return S.ImaginaryUnit * atan(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.is_zero:
return S.Zero
if isinstance(arg, tanh) and arg.args[0].is_number:
z = arg.args[0]
if z.is_real:
return z
r, i = match_real_imag(z)
if r is not None and i is not None:
f = floor(2*i/pi)
even = f.is_even
m = z - I*f*pi/2
if even is True:
return m
elif even is False:
return m - I*pi/2
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
return x**n / n
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_rewrite_as_log(self, x, **kwargs):
return (log(1 + x) - log(1 - x)) / 2
def _eval_is_zero(self):
arg = self.args[0]
if arg.is_zero:
return True
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return tanh
class acoth(InverseHyperbolicFunction):
"""
The inverse hyperbolic cotangent function.
* acoth(x) -> Returns the inverse hyperbolic cotangent of x
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/(1 - self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy import acot
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Zero
elif arg is S.NegativeInfinity:
return S.Zero
elif arg.is_zero:
return S.Pi*S.ImaginaryUnit / 2
elif arg is S.One:
return S.Infinity
elif arg is S.NegativeOne:
return S.NegativeInfinity
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.Zero
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return -S.ImaginaryUnit * acot(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.is_zero:
return S.Pi*S.ImaginaryUnit*S.Half
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return S.Pi*S.ImaginaryUnit / 2
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
return x**n / n
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return S.ImaginaryUnit*S.Pi/2
else:
return self.func(arg)
def _eval_rewrite_as_log(self, x, **kwargs):
return (log(1 + 1/x) - log(1 - 1/x)) / 2
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return coth
class asech(InverseHyperbolicFunction):
"""
The inverse hyperbolic secant function.
* asech(x) -> Returns the inverse hyperbolic secant of x
Examples
========
>>> from sympy import asech, sqrt, S
>>> from sympy.abc import x
>>> asech(x).diff(x)
-1/(x*sqrt(1 - x**2))
>>> asech(1).diff(x)
0
>>> asech(1)
0
>>> asech(S(2))
I*pi/3
>>> asech(-sqrt(2))
3*I*pi/4
>>> asech((sqrt(6) - sqrt(2)))
I*pi/12
See Also
========
asinh, atanh, cosh, acoth
References
==========
.. [1] https://en.wikipedia.org/wiki/Hyperbolic_function
.. [2] http://dlmf.nist.gov/4.37
.. [3] http://functions.wolfram.com/ElementaryFunctions/ArcSech/
"""
def fdiff(self, argindex=1):
if argindex == 1:
z = self.args[0]
return -1/(z*sqrt(1 - z**2))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Pi*S.ImaginaryUnit / 2
elif arg is S.NegativeInfinity:
return S.Pi*S.ImaginaryUnit / 2
elif arg.is_zero:
return S.Infinity
elif arg is S.One:
return S.Zero
elif arg is S.NegativeOne:
return S.Pi*S.ImaginaryUnit
if arg.is_number:
cst_table = {
S.ImaginaryUnit: - (S.Pi*S.ImaginaryUnit / 2) + log(1 + sqrt(2)),
-S.ImaginaryUnit: (S.Pi*S.ImaginaryUnit / 2) + log(1 + sqrt(2)),
(sqrt(6) - sqrt(2)): S.Pi / 12,
(sqrt(2) - sqrt(6)): 11*S.Pi / 12,
sqrt(2 - 2/sqrt(5)): S.Pi / 10,
-sqrt(2 - 2/sqrt(5)): 9*S.Pi / 10,
2 / sqrt(2 + sqrt(2)): S.Pi / 8,
-2 / sqrt(2 + sqrt(2)): 7*S.Pi / 8,
2 / sqrt(3): S.Pi / 6,
-2 / sqrt(3): 5*S.Pi / 6,
(sqrt(5) - 1): S.Pi / 5,
(1 - sqrt(5)): 4*S.Pi / 5,
sqrt(2): S.Pi / 4,
-sqrt(2): 3*S.Pi / 4,
sqrt(2 + 2/sqrt(5)): 3*S.Pi / 10,
-sqrt(2 + 2/sqrt(5)): 7*S.Pi / 10,
S(2): S.Pi / 3,
-S(2): 2*S.Pi / 3,
sqrt(2*(2 + sqrt(2))): 3*S.Pi / 8,
-sqrt(2*(2 + sqrt(2))): 5*S.Pi / 8,
(1 + sqrt(5)): 2*S.Pi / 5,
(-1 - sqrt(5)): 3*S.Pi / 5,
(sqrt(6) + sqrt(2)): 5*S.Pi / 12,
(-sqrt(6) - sqrt(2)): 7*S.Pi / 12,
}
if arg in cst_table:
if arg.is_extended_real:
return cst_table[arg]*S.ImaginaryUnit
return cst_table[arg]
if arg is S.ComplexInfinity:
from sympy.calculus.util import AccumBounds
return S.ImaginaryUnit*AccumBounds(-S.Pi/2, S.Pi/2)
if arg.is_zero:
return S.Infinity
@staticmethod
@cacheit
def expansion_term(n, x, *previous_terms):
if n == 0:
return log(2 / x)
elif n < 0 or n % 2 == 1:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2 and n > 2:
p = previous_terms[-2]
return p * (n - 1)**2 // (n // 2)**2 * x**2 / 4
else:
k = n // 2
R = RisingFactorial(S.Half , k) * n
F = factorial(k) * n // 2 * n // 2
return -1 * R / F * x**n / 4
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return sech
def _eval_rewrite_as_log(self, arg, **kwargs):
return log(1/arg + sqrt(1/arg - 1) * sqrt(1/arg + 1))
class acsch(InverseHyperbolicFunction):
"""
The inverse hyperbolic cosecant function.
* acsch(x) -> Returns the inverse hyperbolic cosecant of x
Examples
========
>>> from sympy import acsch, sqrt, S
>>> from sympy.abc import x
>>> acsch(x).diff(x)
-1/(x**2*sqrt(1 + x**(-2)))
>>> acsch(1).diff(x)
0
>>> acsch(1)
log(1 + sqrt(2))
>>> acsch(S.ImaginaryUnit)
-I*pi/2
>>> acsch(-2*S.ImaginaryUnit)
I*pi/6
>>> acsch(S.ImaginaryUnit*(sqrt(6) - sqrt(2)))
-5*I*pi/12
References
==========
.. [1] https://en.wikipedia.org/wiki/Hyperbolic_function
.. [2] http://dlmf.nist.gov/4.37
.. [3] http://functions.wolfram.com/ElementaryFunctions/ArcCsch/
"""
def fdiff(self, argindex=1):
if argindex == 1:
z = self.args[0]
return -1/(z**2*sqrt(1 + 1/z**2))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Zero
elif arg is S.NegativeInfinity:
return S.Zero
elif arg.is_zero:
return S.ComplexInfinity
elif arg is S.One:
return log(1 + sqrt(2))
elif arg is S.NegativeOne:
return - log(1 + sqrt(2))
if arg.is_number:
cst_table = {
S.ImaginaryUnit: -S.Pi / 2,
S.ImaginaryUnit*(sqrt(2) + sqrt(6)): -S.Pi / 12,
S.ImaginaryUnit*(1 + sqrt(5)): -S.Pi / 10,
S.ImaginaryUnit*2 / sqrt(2 - sqrt(2)): -S.Pi / 8,
S.ImaginaryUnit*2: -S.Pi / 6,
S.ImaginaryUnit*sqrt(2 + 2/sqrt(5)): -S.Pi / 5,
S.ImaginaryUnit*sqrt(2): -S.Pi / 4,
S.ImaginaryUnit*(sqrt(5)-1): -3*S.Pi / 10,
S.ImaginaryUnit*2 / sqrt(3): -S.Pi / 3,
S.ImaginaryUnit*2 / sqrt(2 + sqrt(2)): -3*S.Pi / 8,
S.ImaginaryUnit*sqrt(2 - 2/sqrt(5)): -2*S.Pi / 5,
S.ImaginaryUnit*(sqrt(6) - sqrt(2)): -5*S.Pi / 12,
S(2): -S.ImaginaryUnit*log((1+sqrt(5))/2),
}
if arg in cst_table:
return cst_table[arg]*S.ImaginaryUnit
if arg is S.ComplexInfinity:
return S.Zero
if arg.is_zero:
return S.ComplexInfinity
if _coeff_isneg(arg):
return -cls(-arg)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return csch
def _eval_rewrite_as_log(self, arg, **kwargs):
return log(1/arg + sqrt(1/arg**2 + 1))
| kaushik94/sympy | sympy/functions/elementary/hyperbolic.py | Python | bsd-3-clause | 48,081 |
#!/usr/bin/env python
"""
Single trace Analysis
"""
__author__ = "Yanlong Yin ([email protected])"
__version__ = "$Revision: 1.4$"
__date__ = "$Date: 02/08/2014 $"
__copyright__ = "Copyright (c) 2010-2014 SCS Lab, IIT"
__license__ = "Python"
import sys, os, string, getopt, gc, multiprocessing
from sig import *
from access import *
from accList import *
from prop import *
from util import *
def detectSignature(filename):
# the list contains all the accesses
rlist = AccList()
wlist = AccList()
accList = AccList() # all lines with "accList" are commentted out
# because the figure drawing using accList
# is replaced with rlist and wlist
# open the trace file
f = open(filename, 'r')
# skip the first several lines
# Maybe the skipped lines are table heads
for i in range(int(sig._format_prop['skip_lines'])):
line = f.readline()
# scan the file and put the access item into list
i = 0
j = 0
op_index = int(sig._format_prop['op'])
debugPrint ('op_index: ', op_index)
op = ''
# TODO: add while 1 loop here
for i in range(sig._range):
line = f.readline()
if not line:
break
words = string.split(line)
# there might be some blank lines
if len(words) < 6:
j+=1
continue
## only "READ" and "WRITE" will be saved
#if words[-1].count('READ') == 0 and words[-1].count('WRITE') == 0:
# to test chomob, only use write
# if words[-1].count('WRITE') == 0:
# j+=1
# continue
## save to list
op = words[op_index].upper();
acc = Access(words)
if acc.size >= 1:
accList.append(acc)
if op.count('READ')>0 or op == 'R':
debugPrint("one READ")
rlist.append(acc)
if op.count('WRITE')>0 or op == 'W':
debugPrint("one WRITE")
wlist.append(acc)
## close the opened file
f.close()
rlist.trace = filename
wlist.trace = filename
accList.trace = filename
# print the time summary
print 'Total read time: ', sig._total_read_time
print 'Total write time: ', sig._total_write_time
print 'Numbers of operations - ', 'Read: ', len(rlist), ' write: ', len(wlist)
## deal with the list
rlist.detect_signature(0, min(sig._range-j-1, len(rlist)-1) )
wlist.detect_signature(0, min(sig._range-j-1, len(wlist)-1) )
## Done with the whole process of detecting
## Print the whole signature
if len(rlist.signatures) > 0 or len(wlist.signatures) > 0:
print '----------------------------------------'
print 'The following signatures are detected:'
if len(rlist.signatures) > 0:
rlist.print_signature()
rlist.gen_protobuf(sig._out_path)
rlist.makeup_output(sig._out_path)
if len(wlist.signatures) > 0:
wlist.print_signature()
wlist.gen_protobuf(sig._out_path)
wlist.makeup_output(sig._out_path)
#if len(accList) > 0:
accList.gen_iorates(sig._out_path)
def generateCSVs(single_trace_filename):
"""Generate the Read/Write Bandwidth figures"""
trace_path, trace_filename = os.path.split(single_trace_filename)
# the list contains all the accesses
rlist = AccList()
wlist = AccList()
rlistEmpty = 1
wlistEmpty = 1
total_read_count = 0
total_write_count = 0
total_read_time = 0.0
total_write_time = 0.0
# Create and empty each CSV files, write the CSV title line
output = os.path.join(sig._out_path, trace_filename + ".read.rate.csv")
f = open(output, 'w')
f.write("Time,Rate\n")
f.close()
output = os.path.join(sig._out_path, trace_filename + ".write.rate.csv")
f = open(output, 'w')
f.write("Time,Rate\n")
f.close()
output = os.path.join(sig._out_path, trace_filename + ".read.interval.csv")
f = open(output, 'w')
f.write("Begin,End\n")
f.close()
output = os.path.join(sig._out_path, trace_filename + ".write.interval.csv")
f = open(output, 'w')
f.write("Begin,End\n")
f.close()
output = os.path.join(sig._out_path, trace_filename + ".read.hole.sizes.csv")
f = open(output, 'w')
f.write("Time,Size\n")
f.close()
# open the trace file
f = open(single_trace_filename, 'r')
# skip the first several lines
# Maybe the skipped lines are table heads
for i in range(int(sig._format_prop['skip_lines'])):
line = f.readline()
# scan the file and put the access item into list
i = 0
j = 0
eof = 0 # reaching the EOF?
op_index = int(sig._format_prop['op'])
debugPrint ('op_index: ', op_index)
op = ''
while 1:
# handle 5000 operations once
for i in range(sig._range):
line = f.readline()
if not line:
eof = 1
break
words = string.split(line)
# there might be some blank lines
if len(words) < 6:
j+=1
continue
## only "READ" and "WRITE" will be saved
#if words[-1].count('READ') == 0 and words[-1].count('WRITE') == 0:
# to test chomob, only use write
# if words[-1].count('WRITE') == 0:
# j+=1
# continue
## save to list
op = words[op_index].upper();
acc = Access(words)
if acc.size >= 1:
if op.count('READ')>0 or op == 'R':
debugPrint("one READ")
rlist.append(acc)
total_read_count += 1
total_read_time += acc.endTime - acc.startTime
if op.count('WRITE')>0 or op == 'W':
debugPrint("one WRITE")
wlist.append(acc)
total_write_count += 1
total_write_time += acc.endTime - acc.startTime
# finish reading a batch of 5000 lines of the trace file
# Generate all kinds of CSV files using the rlist and wlist
# here the write operation should be "append"
# because it's handling 5000 lines each time
if (len(rlist) > 0):
rlist.toIORStep(trace_filename, 'r') # 'r' for read
rlist.toDataAccessHoleSizes(trace_filename, 'r')
rlistEmpty = 0
if (len(wlist) > 0):
wlist.toIORStep(trace_filename, 'w') # 'w' for write
wlistEmpty = 0
# empty the two lists
rlist = AccList()
wlist = AccList()
gc.collect() # garbage collection
# reached EOF? exit the "while 1" loop
if eof == 1:
break
## close the opened file
f.close()
if (rlistEmpty == 1):
readF = open( os.path.join(sig._out_path, trace_filename + ".read.rate.csv"), 'a+')
readF.write( "{0},{1}\n".format(0, 0) )
readF.close()
readF = open( os.path.join(sig._out_path, trace_filename + ".read.hole.sizes.csv"), 'a+')
readF.write( "{0},{1}\n".format(0, 0) )
readF.close()
if (wlistEmpty == 1):
writeF = open( os.path.join(sig._out_path, trace_filename + ".write.rate.csv"), 'a+')
writeF.write( "{0},{1}\n".format(0, 0) )
writeF.close()
# TODO: gnuplot for read and write rates
# save the statistics information to files
output = os.path.join(sig._out_path, trace_filename + ".stat.properties")
f = open(output, 'a+')
f.write("total_read_time: {0}\n".format(total_read_time))
f.write("total_read_count: {0}\n".format(total_read_count))
f.write("total_write_time: {0}\n".format(total_write_time))
f.write("total_write_count: {0}\n".format(total_write_count))
#f.write("global_total_read_time: {0}\n".format(sig._total_read_time))
#f.write("global_total_write_time: {0}\n".format(sig._total_write_time))
| yinyanlong/iosig | src/analysis/single_trace_analysis.py | Python | bsd-3-clause | 8,008 |
# -*- coding: utf-8 -*-
#
# __init__.py
# cjktools
#
"""
This package contains various tools for Japanese NLP tasks, although some
may be applicable to any python project. See documentation of each module for
details.
"""
__all__ = [
'alternations',
'common',
'enum',
'exceptions',
'kana_table',
'maps',
'scripts',
'smart_cache',
'resources',
]
| larsyencken/cjktools | cjktools/__init__.py | Python | bsd-3-clause | 385 |
import errno
import os
import types
import typing as t
from werkzeug.utils import import_string
class ConfigAttribute:
"""Makes an attribute forward to the config"""
def __init__(self, name: str, get_converter: t.Optional[t.Callable] = None) -> None:
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj: t.Any, owner: t.Any = None) -> t.Any:
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj: t.Any, value: t.Any) -> None:
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path: str, defaults: t.Optional[dict] = None) -> None:
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name: str, silent: bool = False) -> bool:
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to ``True`` if you want silent failure for missing
files.
:return: bool. ``True`` if able to load config, ``False`` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError(
f"The environment variable {variable_name!r} is not set"
" and as such configuration could not be loaded. Set"
" this variable and make it point to a configuration"
" file"
)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename: str, silent: bool = False) -> bool:
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = types.ModuleType("config")
d.__file__ = filename
try:
with open(filename, mode="rb") as config_file:
exec(compile(config_file.read(), filename, "exec"), d.__dict__)
except OSError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR):
return False
e.strerror = f"Unable to load configuration file ({e.strerror})"
raise
self.from_object(d)
return True
def from_object(self, obj: t.Union[object, str]) -> None:
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes. :meth:`from_object`
loads only the uppercase attributes of the module/class. A ``dict``
object will not work with :meth:`from_object` because the keys of a
``dict`` are not attributes of the ``dict`` class.
Example of module-based configuration::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
Nothing is done to the object before loading. If the object is a
class and has ``@property`` attributes, it needs to be
instantiated before being passed to this method.
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
See :ref:`config-dev-prod` for an example of class-based configuration
using :meth:`from_object`.
:param obj: an import name or object
"""
if isinstance(obj, str):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def from_file(
self,
filename: str,
load: t.Callable[[t.IO[t.Any]], t.Mapping],
silent: bool = False,
) -> bool:
"""Update the values in the config from a file that is loaded
using the ``load`` parameter. The loaded data is passed to the
:meth:`from_mapping` method.
.. code-block:: python
import toml
app.config.from_file("config.toml", load=toml.load)
:param filename: The path to the data file. This can be an
absolute path or relative to the config root path.
:param load: A callable that takes a file handle and returns a
mapping of loaded data from the file.
:type load: ``Callable[[Reader], Mapping]`` where ``Reader``
implements a ``read`` method.
:param silent: Ignore the file if it doesn't exist.
.. versionadded:: 2.0
"""
filename = os.path.join(self.root_path, filename)
try:
with open(filename) as f:
obj = load(f)
except OSError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = f"Unable to load configuration file ({e.strerror})"
raise
return self.from_mapping(obj)
def from_json(self, filename: str, silent: bool = False) -> bool:
"""Update the values in the config from a JSON file. The loaded
data is passed to the :meth:`from_mapping` method.
:param filename: The path to the JSON file. This can be an
absolute path or relative to the config root path.
:param silent: Ignore the file if it doesn't exist.
.. deprecated:: 2.0.0
Will be removed in Flask 2.1. Use :meth:`from_file` instead.
This was removed early in 2.0.0, was added back in 2.0.1.
.. versionadded:: 0.11
"""
import warnings
from . import json
warnings.warn(
"'from_json' is deprecated and will be removed in Flask"
" 2.1. Use 'from_file(path, json.load)' instead.",
DeprecationWarning,
stacklevel=2,
)
return self.from_file(filename, json.load, silent=silent)
def from_mapping(
self, mapping: t.Optional[t.Mapping[str, t.Any]] = None, **kwargs: t.Any
) -> bool:
"""Updates the config like :meth:`update` ignoring items with non-upper
keys.
.. versionadded:: 0.11
"""
mappings: t.Dict[str, t.Any] = {}
if mapping is not None:
mappings.update(mapping)
mappings.update(kwargs)
for key, value in mappings.items():
if key.isupper():
self[key] = value
return True
def get_namespace(
self, namespace: str, lowercase: bool = True, trim_namespace: bool = True
) -> t.Dict[str, t.Any]:
"""Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage::
app.config['IMAGE_STORE_TYPE'] = 'fs'
app.config['IMAGE_STORE_PATH'] = '/var/app/images'
app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
image_store_config = app.config.get_namespace('IMAGE_STORE_')
The resulting dictionary `image_store_config` would look like::
{
'type': 'fs',
'path': '/var/app/images',
'base_url': 'http://img.website.com'
}
This is often useful when configuration options map directly to
keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
.. versionadded:: 0.11
"""
rv = {}
for k, v in self.items():
if not k.startswith(namespace):
continue
if trim_namespace:
key = k[len(namespace) :]
else:
key = k
if lowercase:
key = key.lower()
rv[key] = v
return rv
def __repr__(self) -> str:
return f"<{type(self).__name__} {dict.__repr__(self)}>"
| mitsuhiko/flask | src/flask/config.py | Python | bsd-3-clause | 11,068 |
from sympy.core.numbers import comp, Rational
from sympy.physics.optics.utils import (refraction_angle, fresnel_coefficients,
deviation, brewster_angle, critical_angle, lens_makers_formula,
mirror_formula, lens_formula, hyperfocal_distance,
transverse_magnification)
from sympy.physics.optics.medium import Medium
from sympy.physics.units import e0
from sympy import symbols, sqrt, Matrix, oo
from sympy.geometry.point import Point3D
from sympy.geometry.line import Ray3D
from sympy.geometry.plane import Plane
from sympy.utilities.pytest import raises
ae = lambda a, b, n: comp(a, b, 10**-n)
def test_refraction_angle():
n1, n2 = symbols('n1, n2')
m1 = Medium('m1')
m2 = Medium('m2')
r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0))
i = Matrix([1, 1, 1])
n = Matrix([0, 0, 1])
normal_ray = Ray3D(Point3D(0, 0, 0), Point3D(0, 0, 1))
P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1])
assert refraction_angle(r1, 1, 1, n) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle([1, 1, 1], 1, 1, n) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle((1, 1, 1), 1, 1, n) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle(i, 1, 1, [0, 0, 1]) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle(i, 1, 1, (0, 0, 1)) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle(i, 1, 1, normal_ray) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle(i, 1, 1, plane=P) == Matrix([
[ 1],
[ 1],
[-1]])
assert refraction_angle(r1, 1, 1, plane=P) == \
Ray3D(Point3D(0, 0, 0), Point3D(1, 1, -1))
assert refraction_angle(r1, m1, 1.33, plane=P) == \
Ray3D(Point3D(0, 0, 0), Point3D(Rational(100, 133), Rational(100, 133), -789378201649271*sqrt(3)/1000000000000000))
assert refraction_angle(r1, 1, m2, plane=P) == \
Ray3D(Point3D(0, 0, 0), Point3D(1, 1, -1))
assert refraction_angle(r1, n1, n2, plane=P) == \
Ray3D(Point3D(0, 0, 0), Point3D(n1/n2, n1/n2, -sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1)))
assert refraction_angle(r1, 1.33, 1, plane=P) == 0 # TIR
assert refraction_angle(r1, 1, 1, normal_ray) == \
Ray3D(Point3D(0, 0, 0), direction_ratio=[1, 1, -1])
assert ae(refraction_angle(0.5, 1, 2), 0.24207, 5)
assert ae(refraction_angle(0.5, 2, 1), 1.28293, 5)
raises(ValueError, lambda: refraction_angle(r1, m1, m2, normal_ray, P))
raises(TypeError, lambda: refraction_angle(m1, m1, m2)) # can add other values for arg[0]
raises(TypeError, lambda: refraction_angle(r1, m1, m2, None, i))
raises(TypeError, lambda: refraction_angle(r1, m1, m2, m2))
def test_fresnel_coefficients():
assert all(ae(i, j, 5) for i, j in zip(
fresnel_coefficients(0.5, 1, 1.33),
[0.11163, -0.17138, 0.83581, 0.82862]))
assert all(ae(i, j, 5) for i, j in zip(
fresnel_coefficients(0.5, 1.33, 1),
[-0.07726, 0.20482, 1.22724, 1.20482]))
m1 = Medium('m1')
m2 = Medium('m2', n=2)
assert all(ae(i, j, 5) for i, j in zip(
fresnel_coefficients(0.3, m1, m2),
[0.31784, -0.34865, 0.65892, 0.65135]))
ans = [[-0.23563, -0.97184], [0.81648, -0.57738]]
got = fresnel_coefficients(0.6, m2, m1)
for i, j in zip(got, ans):
for a, b in zip(i.as_real_imag(), j):
assert ae(a, b, 5)
def test_deviation():
n1, n2 = symbols('n1, n2')
r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0))
n = Matrix([0, 0, 1])
i = Matrix([-1, -1, -1])
normal_ray = Ray3D(Point3D(0, 0, 0), Point3D(0, 0, 1))
P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1])
assert deviation(r1, 1, 1, normal=n) == 0
assert deviation(r1, 1, 1, plane=P) == 0
assert deviation(r1, 1, 1.1, plane=P).evalf(3) + 0.119 < 1e-3
assert deviation(i, 1, 1.1, normal=normal_ray).evalf(3) + 0.119 < 1e-3
assert deviation(r1, 1.33, 1, plane=P) is None # TIR
assert deviation(r1, 1, 1, normal=[0, 0, 1]) == 0
assert deviation([-1, -1, -1], 1, 1, normal=[0, 0, 1]) == 0
assert ae(deviation(0.5, 1, 2), -0.25793, 5)
assert ae(deviation(0.5, 2, 1), 0.78293, 5)
def test_brewster_angle():
m1 = Medium('m1', n=1)
m2 = Medium('m2', n=1.33)
assert ae(brewster_angle(m1, m2), 0.93, 2)
m1 = Medium('m1', permittivity=e0, n=1)
m2 = Medium('m2', permittivity=e0, n=1.33)
assert ae(brewster_angle(m1, m2), 0.93, 2)
assert ae(brewster_angle(1, 1.33), 0.93, 2)
def test_critical_angle():
m1 = Medium('m1', n=1)
m2 = Medium('m2', n=1.33)
assert ae(critical_angle(m2, m1), 0.85, 2)
def test_lens_makers_formula():
n1, n2 = symbols('n1, n2')
m1 = Medium('m1', permittivity=e0, n=1)
m2 = Medium('m2', permittivity=e0, n=1.33)
assert lens_makers_formula(n1, n2, 10, -10) == 5*n2/(n1 - n2)
assert ae(lens_makers_formula(m1, m2, 10, -10), -20.15, 2)
assert ae(lens_makers_formula(1.33, 1, 10, -10), 15.15, 2)
def test_mirror_formula():
u, v, f = symbols('u, v, f')
assert mirror_formula(focal_length=f, u=u) == f*u/(-f + u)
assert mirror_formula(focal_length=f, v=v) == f*v/(-f + v)
assert mirror_formula(u=u, v=v) == u*v/(u + v)
assert mirror_formula(u=oo, v=v) == v
assert mirror_formula(u=oo, v=oo) is oo
assert mirror_formula(focal_length=oo, u=u) == -u
assert mirror_formula(u=u, v=oo) == u
assert mirror_formula(focal_length=oo, v=oo) is oo
assert mirror_formula(focal_length=f, v=oo) == f
assert mirror_formula(focal_length=oo, v=v) == -v
assert mirror_formula(focal_length=oo, u=oo) is oo
assert mirror_formula(focal_length=f, u=oo) == f
assert mirror_formula(focal_length=oo, u=u) == -u
raises(ValueError, lambda: mirror_formula(focal_length=f, u=u, v=v))
def test_lens_formula():
u, v, f = symbols('u, v, f')
assert lens_formula(focal_length=f, u=u) == f*u/(f + u)
assert lens_formula(focal_length=f, v=v) == f*v/(f - v)
assert lens_formula(u=u, v=v) == u*v/(u - v)
assert lens_formula(u=oo, v=v) == v
assert lens_formula(u=oo, v=oo) is oo
assert lens_formula(focal_length=oo, u=u) == u
assert lens_formula(u=u, v=oo) == -u
assert lens_formula(focal_length=oo, v=oo) is -oo
assert lens_formula(focal_length=oo, v=v) == v
assert lens_formula(focal_length=f, v=oo) == -f
assert lens_formula(focal_length=oo, u=oo) is oo
assert lens_formula(focal_length=oo, u=u) == u
assert lens_formula(focal_length=f, u=oo) == f
raises(ValueError, lambda: lens_formula(focal_length=f, u=u, v=v))
def test_hyperfocal_distance():
f, N, c = symbols('f, N, c')
assert hyperfocal_distance(f=f, N=N, c=c) == f**2/(N*c)
assert ae(hyperfocal_distance(f=0.5, N=8, c=0.0033), 9.47, 2)
def test_transverse_magnification():
si, so = symbols('si, so')
assert transverse_magnification(si, so) == -si/so
assert transverse_magnification(30, 15) == -2
| kaushik94/sympy | sympy/physics/optics/tests/test_utils.py | Python | bsd-3-clause | 7,792 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a layer of abstraction for the issue tracker API."""
import logging
from apiclient import discovery
from apiclient import errors
import httplib2
_DISCOVERY_URI = ('https://monorail-prod.appspot.com'
'/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest')
class IssueTrackerService(object):
"""Class for updating bug issues."""
def __init__(self, http=None, additional_credentials=None):
"""Initializes an object for adding and updating bugs on the issue tracker.
This object can be re-used to make multiple requests without calling
apliclient.discovery.build multiple times.
This class makes requests to the Monorail API.
API explorer: https://goo.gl/xWd0dX
Args:
http: A Http object to pass to request.execute; this should be an
Http object that's already authenticated via OAuth2.
additional_credentials: A credentials object, e.g. an instance of
oauth2client.client.SignedJwtAssertionCredentials. This includes
the email and secret key of a service account.
"""
self._http = http or httplib2.Http()
if additional_credentials:
additional_credentials.authorize(self._http)
self._service = discovery.build(
'monorail', 'v1', discoveryServiceUrl=_DISCOVERY_URI,
http=self._http)
def AddBugComment(self, bug_id, comment, status=None, cc_list=None,
merge_issue=None, labels=None, owner=None):
"""Adds a comment with the bisect results to the given bug.
Args:
bug_id: Bug ID of the issue to update.
comment: Bisect results information.
status: A string status for bug, e.g. Assigned, Duplicate, WontFix, etc.
cc_list: List of email addresses of users to add to the CC list.
merge_issue: ID of the issue to be merged into; specifying this option
implies that the status should be "Duplicate".
labels: List of labels for bug.
owner: Owner of the bug.
Returns:
True if successful, False otherwise.
"""
if not bug_id or bug_id < 0:
return False
body = {'content': comment}
updates = {}
# Mark issue as duplicate when relevant bug ID is found in the datastore.
# Avoid marking an issue as duplicate of itself.
if merge_issue and int(merge_issue) != bug_id:
status = 'Duplicate'
updates['mergedInto'] = merge_issue
logging.info('Bug %s marked as duplicate of %s', bug_id, merge_issue)
if status:
updates['status'] = status
if cc_list:
updates['cc'] = cc_list
if labels:
updates['labels'] = labels
if owner:
updates['owner'] = owner
body['updates'] = updates
return self._MakeCommentRequest(bug_id, body)
def List(self, **kwargs):
"""Make a request to the issue tracker to list bugs."""
request = self._service.issues().list(projectId='chromium', **kwargs)
return self._ExecuteRequest(request)
def _MakeCommentRequest(self, bug_id, body):
"""Make a request to the issue tracker to update a bug."""
request = self._service.issues().comments().insert(
projectId='chromium',
issueId=bug_id,
body=body)
response = self._ExecuteRequest(request)
if not response:
logging.error('Error updating bug %s with body %s', bug_id, body)
return False
return True
def NewBug(self, title, description, labels=None, components=None,
owner=None):
"""Creates a new bug.
Args:
title: The short title text of the bug.
description: The body text for the bug.
labels: Starting labels for the bug.
components: Starting components for the bug.
owner: Starting owner account name.
Returns:
The new bug ID if successfully created, or None.
"""
body = {
'title': title,
'summary': title,
'description': description,
'labels': labels or [],
'components': components or [],
'status': 'Assigned',
}
if owner:
body['owner'] = {'name': owner}
return self._MakeCreateRequest(body)
def _MakeCreateRequest(self, body):
"""Makes a request to create a new bug.
Args:
body: The request body parameter dictionary.
Returns:
A bug ID if successful, or None otherwise.
"""
request = self._service.issues().insert(projectId='chromium', body=body)
response = self._ExecuteRequest(request)
if response and 'id' in response:
return response['id']
return None
def GetLastBugCommentsAndTimestamp(self, bug_id):
"""Gets last updated comments and timestamp in the given bug.
Args:
bug_id: Bug ID of the issue to update.
Returns:
A dictionary with last comment and timestamp, or None on failure.
"""
if not bug_id or bug_id < 0:
return None
response = self._MakeGetCommentsRequest(bug_id)
if response and all(v in response.keys()
for v in ['totalResults', 'items']):
bug_comments = response.get('items')[response.get('totalResults') - 1]
if bug_comments.get('content') and bug_comments.get('published'):
return {
'comment': bug_comments.get('content'),
'timestamp': bug_comments.get('published')
}
return None
def _MakeGetCommentsRequest(self, bug_id):
"""Make a request to the issue tracker to get comments in the bug."""
# TODO (prasadv): By default the max number of comments retrieved in
# one request is 100. Since bisect-fyi jobs may have more then 100
# comments for now we set this maxResults count as 10000.
# Remove this max count once we find a way to clear old comments
# on FYI issues.
request = self._service.issues().comments().list(
projectId='chromium',
issueId=bug_id,
maxResults=10000)
return self._ExecuteRequest(request)
def _ExecuteRequest(self, request):
"""Make a request to the issue tracker.
Args:
request: The request object, which has a execute method.
Returns:
The response if there was one, or else None.
"""
try:
response = request.execute(http=self._http)
return response
except errors.HttpError as e:
logging.error(e)
return None
| SummerLW/Perf-Insight-Report | dashboard/dashboard/issue_tracker_service.py | Python | bsd-3-clause | 6,428 |
# -*- coding: utf-8 -*-
from flask import render_template, redirect, url_for, flash, abort
from purchasing.decorators import requires_roles
from purchasing.data.stages import Stage
from purchasing.data.flows import Flow
from purchasing.conductor.forms import FlowForm, NewFlowForm
from purchasing.conductor.manager import blueprint
@blueprint.route('/flow/new', methods=['GET', 'POST'])
@requires_roles('conductor', 'admin', 'superadmin')
def new_flow():
'''Create a new flow
:status 200: Render the new flow template
:status 302: Try to create a new flow using the
:py:class:`~purchasing.conductor.forms.NewFlowForm`, redirect
to the flows list view if successful
'''
stages = Stage.choices_factory()
form = NewFlowForm(stages=stages)
if form.validate_on_submit():
stage_order = []
for entry in form.stage_order.entries:
# try to evaluate the return value as an ID
try:
stage_id = int(entry.data)
# otherwise it's a new stage
except ValueError:
new_stage = Stage.create(name=entry.data)
stage_id = new_stage.id
stage_order.append(stage_id)
Flow.create(flow_name=form.flow_name.data, stage_order=stage_order)
flash('Flow created successfully!', 'alert-success')
return redirect(url_for('conductor.flows_list'))
return render_template('conductor/flows/new.html', stages=stages, form=form)
@blueprint.route('/flows')
@requires_roles('conductor', 'admin', 'superadmin')
def flows_list():
'''List all flows
:status 200: Render the all flows list template
'''
flows = Flow.query.order_by(Flow.flow_name).all()
active, archived = [], []
for flow in flows:
if flow.is_archived:
archived.append(flow)
else:
active.append(flow)
return render_template('conductor/flows/browse.html', active=active, archived=archived)
@blueprint.route('/flow/<int:flow_id>', methods=['GET', 'POST'])
@requires_roles('conductor', 'admin', 'superadmin')
def flow_detail(flow_id):
'''View/edit a flow's details
:status 200: Render the flow edit template
:status 302: Post changes to the a flow using the submitted
:py:class:`~purchasing.conductor.forms.FlowForm`, redirect back to
the current flow's detail page if successful
'''
flow = Flow.query.get(flow_id)
if flow:
form = FlowForm(obj=flow)
if form.validate_on_submit():
flow.update(
flow_name=form.data['flow_name'],
is_archived=form.data['is_archived']
)
flash('Flow successfully updated', 'alert-success')
return redirect(url_for('conductor.flow_detail', flow_id=flow.id))
return render_template('conductor/flows/edit.html', form=form, flow=flow)
abort(404)
| codeforamerica/pittsburgh-purchasing-suite | purchasing/conductor/manager/flow_management.py | Python | bsd-3-clause | 2,911 |
Subsets and Splits