code
string | repo_name
string | path
string | language
string | license
string | size
int64 |
---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8-*-
# ------------------------------------------------------------------------------
# Copyright (c) 2007-2021, Ricardo Amézquita Orozco
# All rights reserved.
#
# This software is provided without warranty under the terms of the GPLv3
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license.
#
#
# Author: Ricardo Amézquita Orozco
# Description: Material definition helper class
# Symbols Defined: Material
#
#
# ------------------------------------------------------------------------------
'''
Material class definition, and helper functions used to load the
constants of the dispersion formula to be used in the calculation of
the refraction index.
It uses the database from https://refractiveindex.info
'''
from os import walk
from os.path import join, expanduser, relpath
from pkg_resources import resource_filename
from .mat_eq import from_yml, ModelNotImplemented
from configparser import ConfigParser
mat_config = resource_filename("pyoptools.raytrace.mat_lib", 'data')
# mat_config = "../mat_lib/data/"
# Get library names from the system
# Get data from glass folder
libnames = []
libpath = join(mat_config, "glass")
for (dirpath, dirnames, filenames) in walk(libpath):
library = relpath(dirpath, libpath)
# Exclude some names that are not libraries
if library in [".", ]:
continue
libnames.append((relpath(dirpath, libpath)).replace("/", "_"))
# Get data from main folder
#mainlibnames = []
#mainlibpath = join(mat_config, "main")
#for (dirpath, dirnames, filenames) in walk(mainlibpath):
# library = relpath(dirpath, mainlibpath)
# # Exclude some names that are not libraries
# if library in [".", ]:
# continue
# mainlibnames.append((relpath(dirpath, mainlibpath)).replace("/", "_"))
# Get library names from the user home
homelibpath = join(expanduser("~"), ".pyoptools", "material", "glass")
homelibnames = []
for (dirpath, dirnames, filenames) in walk(homelibpath):
library = relpath(dirpath, homelibpath)
if library in [".", ]:
continue
homelibnames.append((relpath(dirpath, homelibpath)).replace("/", "_"))
# Create the materials dictionary
# Note: If a home library has the same name as a system library, all the
# glasses defined will be merged in the same library
libset = list(set(libnames+["main"]+homelibnames))
liblist = []
for libname in libset:
# Create the dictionaries where the materials will be saved. One dictionary
# per library,
globals()[libname] = {}
liblist.append((libname, globals()[libname]))
liblist.sort()
# Fill the dictionaries with the current materials system wide, and then with
# the materials defined in the home of the user
for npath in [libpath, homelibpath]:
for (dirpath, dirnames, filenames) in walk(npath):
library = (relpath(dirpath, npath)).replace("/","_")
# Exclude some names that are not libraries
if library in [".", ]:
continue
for name in filenames:
try:
matname = name.split(".")[0]
globals()[library][matname] = from_yml(join(dirpath, name))
except ModelNotImplemented:
continue
# Fill the main library. The main folder is interpreted as a catalog, and each
# material plus its different models are listed as material instances.
# This was done to keep the list of catalogs short. In needed an alias can be
# created
npath = join(mat_config, "main")
for (dirpath, dirnames, filenames) in walk(npath):
library = (relpath(dirpath, npath)).replace("/","_")
# Exclude some names that are not libraries
if library in [".", ]:
continue
for name in filenames:
try:
matname = name.split(".")[0]
globals()["main"][library+"_"+matname] = from_yml(join(dirpath, name))
except ModelNotImplemented:
continue
# Create the aliases material library. It will read the information from the
# aliases.cfg file
aliases_path = join(mat_config,"aliases.cfg")
globals()["aliases"] = {}
config = ConfigParser()
config.read(aliases_path)
for i in config:
if i == "DEFAULT": continue
libr = config[i]["library"]
mate = config[i]["material"]
globals()["aliases"][i] = globals()[libr][mate]
liblist.append(("aliases", globals()["aliases"]))
def find_material(material):
"""Search for a material in all the libraries
This function prints all the libraries that contain the material
Arguments:
material
String with the material name
"""
retv=[]
for libn, _ in liblist:
if material in globals()[libn]:
retv.append(libn)
return retv
def get_material(material):
"""Search for a material in all the libraries
This function search in all the material libraries, and return the
first instance found, that matches the name of the material requested.
If no material found, returns None
Arguments:
material
String with the material name
"""
for libn, _ in liblist:
tdict=globals()[libn]
if material in tdict:
return tdict[material]
print (material, " not found")
raise KeyError
def mat_list():
for libn, _ in liblist:
tdict = globals()[libn]
print(libn, tdict.keys())
| cihologramas/pyoptools | pyoptools/raytrace/mat_lib/material.py | Python | gpl-3.0 | 5,423 |
from django.contrib import admin
class CandidateAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['email', 'first_name', 'last_name', 'gender', 'cv']
}),
('Contact Information', {
'classes': ('collapse',),
'fields': ['mobile_phone']
}),
('Address Information', {
'classes': ('collapse',),
'fields': ['address', 'city']
}),
('Additional Information', {
'classes': ('collapse',),
'fields': ['qualification', 'institute', 'experienced']
})
]
def get_fieldsets(self, request, obj=None):
if obj is None:
self.fieldsets[0][1]['fields'] = ['email', 'first_name',
'last_name', 'gender', 'cv']
else:
self.fieldsets[0][1]['fields'] = ['email', 'first_name',
'last_name', 'gender', 'cv',
'status']
return self.fieldsets
| QC-Technologies/HRMS | interview/admin/candidate.py | Python | gpl-3.0 | 1,067 |
from setuptools import setup, find_packages
setup(
name='vmpie',
version='0.1a',
packages=find_packages(),
author='',
entry_points={
'vmpie.subsystems':
[
'VCenter = vmpie.vcenter:VCenter'
],
'console_scripts':
[
'vmplugin = vmpie.vmplugin:main'
]
},
install_requires=[
'pyVmomi',
'requests',
'six>=1.7.3',
# FIXME: pyvmoni-tools is not in the PyPI and therefore cannot be a dependency.
# 'pyvmomi_tools',
'Pyro4',
'urllib3'
]
)
| LevyCory/vmpie | setup.py | Python | gpl-3.0 | 614 |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestSoilTexture(unittest.TestCase):
def test_texture_selection(self):
soil_tex = frappe.get_all('Soil Texture', fields=['name'], filters={'collection_datetime': '2017-11-08'})
doc = frappe.get_doc('Soil Texture', soil_tex[0].name)
self.assertEquals(doc.silt_composition, 50)
self.assertEquals(doc.soil_type, 'Silt Loam') | indictranstech/erpnext | erpnext/agriculture/doctype/soil_texture/test_soil_texture.py | Python | agpl-3.0 | 518 |
#!/usr/bin/env python
""" This script is an example of why I care so much about Mozharness' 2nd core
concept, logging. http://escapewindow.dreamwidth.org/230853.html
"""
import os
import shutil
#print "downloading foo.tar.bz2..."
os.system("curl -s -o foo.tar.bz2 http://people.mozilla.org/~asasaki/foo.tar.bz2")
#os.system("curl -v -o foo.tar.bz2 http://people.mozilla.org/~asasaki/foo.tar.bz2")
#os.rename("foo.tar.bz2", "foo3.tar.bz2")
os.system("tar xjf foo.tar.bz2")
#os.chdir("x")
os.remove("x/ship2")
os.remove("foo.tar.bz2")
os.system("tar cjf foo.tar.bz2 x")
shutil.rmtree("x")
#os.system("scp -q foo.tar.bz2 people.mozilla.org:public_html/foo2.tar.bz2")
os.remove("foo.tar.bz2")
| Yukarumya/Yukarum-Redfoxes | testing/mozharness/examples/silent_script.py | Python | mpl-2.0 | 694 |
# -*- cording: utf-8 -*-
from __future__ import unicode_literals
__author__ = "wakita181009"
__author_email__ = "[email protected]"
__version__ = "0.0.4"
__license__ = "MIT"
| wakita181009/slack-api-utils | slack_api_utils/__init__.py | Python | mit | 180 |
from django.db.models.lookups import Lookup
class HashLookup(Lookup):
"""Lookup to filter hashed values.
`HashLookup` is hashing the value on the right hand side with
the function specified in `encrypt_sql`.
"""
lookup_name = 'hash_of'
def as_sql(self, qn, connection):
"""Responsible for creating the lookup with the digest SQL.
Modify the right hand side expression to compare the value passed
to a hash.
"""
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
rhs = self.lhs.field.encrypt_sql % rhs
return ('{}::bytea = {}'.format(lhs, rhs)), params
| incuna/django-pgcrypto-fields | pgcrypto/lookups.py | Python | bsd-2-clause | 737 |
#!/usr/bin/env python
# reference variable to Storyboard Omnet++ module: board
import storyboard
import timeline
print ("demo.py successfully imported...")
def createStories(board):
# Create coordinates needed for the PolygonCondition
coord0 = storyboard.Coord(0.0, 0.0)
coord1 = storyboard.Coord(3000.0, 0.0)
coord2 = storyboard.Coord(3000.0, 1600.0)
coord3 = storyboard.Coord(0.0, 1600.0)
# Create PolygonCondition
cond0 = storyboard.PolygonCondition([coord0, coord1, coord2, coord3])
# Create TimeCondition
cond1 = storyboard.TimeCondition(timeline.milliseconds(15000))
# Create CarSetCondition
cond2 = storyboard.CarSetCondition({"flow1.0", "flow0.1", "flow0.2"})
# Create SpeedEffect
effect0 = storyboard.SpeedEffect(2.44)
# Create AndConditions
and0 = storyboard.AndCondition(cond0, cond1)
and1 = storyboard.AndCondition(and0, cond2)
# Create OrCondition
cond3 = storyboard.TimeCondition(timeline.seconds(190))
or0 = storyboard.OrCondition(cond3, and1)
# Create Story
story = storyboard.Story(or0, [effect0])
# Create Story 2
cond4 = storyboard.TimeCondition(timeline.seconds(50), timeline.seconds(60))
effect1 = storyboard.SpeedEffect(2.44)
story1 = storyboard.Story(cond4, [effect1])
# Create Story 3, overlapping story0
cond5 = storyboard.TimeCondition(timeline.seconds(200), timeline.seconds(210))
cond6 = storyboard.CarSetCondition({"flow0.0", "flow0.1"})
and2 = storyboard.AndCondition(cond5, cond6)
effect2 = storyboard.SpeedEffect(0.1)
story2 = storyboard.Story(and2, [effect2])
# Create Story 4, SpeedConditionGreater
cond7 = storyboard.SpeedConditionGreater(4.0)
cond8 = storyboard.TimeCondition(timeline.seconds(20), timeline.seconds(30))
and3 = storyboard.AndCondition(cond7, cond8)
effect3 = storyboard.SpeedEffect(1.0)
story3 = storyboard.Story(and3, [effect3])
# Register Stories at the Storyboard
board.registerStory(story)
board.registerStory(story1)
board.registerStory(story2)
board.registerStory(story3)
print("Stories loaded!")
| riebl/artery | scenarios/storyboard/demo.py | Python | gpl-2.0 | 2,014 |
from django.conf.urls import url, include
from . import views
app_name = 'accounts'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^update/$', views.update, name='update'),
url(r'^register/$', views.register, name='register'),
]
| yayoiukai/signalserver | accounts/urls.py | Python | mit | 257 |
def main():
with open('file.txt'):
print(42) | smmribeiro/intellij-community | python/testData/quickFixes/PyRemoveUnusedLocalQuickFixTest/withOneTarget_after.py | Python | apache-2.0 | 56 |
# coding: utf-8
"""
Katib
Swagger description for Katib # noqa: E501
The version of the OpenAPI document: v1beta1-0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubeflow.katib.configuration import Configuration
class V1beta1Observation(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'metrics': 'list[V1beta1Metric]'
}
attribute_map = {
'metrics': 'metrics'
}
def __init__(self, metrics=None, local_vars_configuration=None): # noqa: E501
"""V1beta1Observation - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._metrics = None
self.discriminator = None
if metrics is not None:
self.metrics = metrics
@property
def metrics(self):
"""Gets the metrics of this V1beta1Observation. # noqa: E501
Key-value pairs for metric names and values # noqa: E501
:return: The metrics of this V1beta1Observation. # noqa: E501
:rtype: list[V1beta1Metric]
"""
return self._metrics
@metrics.setter
def metrics(self, metrics):
"""Sets the metrics of this V1beta1Observation.
Key-value pairs for metric names and values # noqa: E501
:param metrics: The metrics of this V1beta1Observation. # noqa: E501
:type: list[V1beta1Metric]
"""
self._metrics = metrics
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1Observation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1Observation):
return True
return self.to_dict() != other.to_dict()
| kubeflow/katib | sdk/python/v1beta1/kubeflow/katib/models/v1beta1_observation.py | Python | apache-2.0 | 3,470 |
from random import shuffle
from clusto.drivers import Controller
import libvirt
class VMController(Controller):
@classmethod
def allocate(cls, pool, namemanager, ipmanager, memory, disk, swap, storage_pool='vol0'):
'''
Allocate a new VM running on a server in the given pool with enough
free memory. The new VM will be assigned a name from the given
namemanager.
Memory is specified in megabytes (MB)
Swap is specified in megabytes (MB)
Disk is specified in gigabytes (GB)
'''
# Find a suitable server in the pool
host = VMController._find_hypervisor(pool, memory, disk, swap, storage_pool)
# Call libvirt to create the server
vmxml = VMController._xen_create_vm(host, memory, disk, swap, storage_pool)
vm = namemanager.allocate(XenVirtualServer)
vm.from_xml(vmxml)
# Assign an IP to the server object
ipmanager.allocate(vm)
# Return VM object
return vm
@classmethod
def destroy(cls, obj):
# Call libvirt to destroy the server
# clusto.deleteEntity(obj.entity)
@classmethod
def _find_hypervisor(cls, pool, memory, disk, swap, storage_pool):
candidates = pool.contents()
shuffle(candidates)
while True:
if not candidates:
raise Exception('No hypervisor candidates have enough available resources')
server = candidates.pop()
ip = server.get_ips()
if not ip:
continue
conn = libvirt.openReadOnly('xen+tcp://%s' % ip[0])
if not conn:
continue
freedisk = conn.storagePoolLookupByName(storage_pool).info()[3]
if (disk * 1073741824) > freedisk:
continue
freemem = conn.getFreeMemory() / 1048576
if mem > freemem:
continue
return server
@classmethod
def _xen_create_vm(cls,
| rongoro/clusto | src/clusto/drivers/controllers/VMController.py | Python | bsd-3-clause | 2,014 |
"""Support for MelCloud device sensors."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any
from pymelcloud import DEVICE_TYPE_ATA, DEVICE_TYPE_ATW
from pymelcloud.atw_device import Zone
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ENERGY_KILO_WATT_HOUR, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import MelCloudDevice
from .const import DOMAIN
@dataclass
class MelcloudRequiredKeysMixin:
"""Mixin for required keys."""
value_fn: Callable[[Any], float]
enabled: Callable[[Any], bool]
@dataclass
class MelcloudSensorEntityDescription(
SensorEntityDescription, MelcloudRequiredKeysMixin
):
"""Describes Melcloud sensor entity."""
ATA_SENSORS: tuple[MelcloudSensorEntityDescription, ...] = (
MelcloudSensorEntityDescription(
key="room_temperature",
name="Room Temperature",
icon="mdi:thermometer",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
value_fn=lambda x: x.device.room_temperature,
enabled=lambda x: True,
),
MelcloudSensorEntityDescription(
key="energy",
name="Energy",
icon="mdi:factory",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
value_fn=lambda x: x.device.total_energy_consumed,
enabled=lambda x: x.device.has_energy_consumed_meter,
),
)
ATW_SENSORS: tuple[MelcloudSensorEntityDescription, ...] = (
MelcloudSensorEntityDescription(
key="outside_temperature",
name="Outside Temperature",
icon="mdi:thermometer",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
value_fn=lambda x: x.device.outside_temperature,
enabled=lambda x: True,
),
MelcloudSensorEntityDescription(
key="tank_temperature",
name="Tank Temperature",
icon="mdi:thermometer",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
value_fn=lambda x: x.device.tank_temperature,
enabled=lambda x: True,
),
)
ATW_ZONE_SENSORS: tuple[MelcloudSensorEntityDescription, ...] = (
MelcloudSensorEntityDescription(
key="room_temperature",
name="Room Temperature",
icon="mdi:thermometer",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
value_fn=lambda zone: zone.room_temperature,
enabled=lambda x: True,
),
MelcloudSensorEntityDescription(
key="flow_temperature",
name="Flow Temperature",
icon="mdi:thermometer",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
value_fn=lambda zone: zone.flow_temperature,
enabled=lambda x: True,
),
MelcloudSensorEntityDescription(
key="return_temperature",
name="Flow Return Temperature",
icon="mdi:thermometer",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
value_fn=lambda zone: zone.return_temperature,
enabled=lambda x: True,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up MELCloud device sensors based on config_entry."""
mel_devices = hass.data[DOMAIN].get(entry.entry_id)
entities: list[MelDeviceSensor] = [
MelDeviceSensor(mel_device, description)
for description in ATA_SENSORS
for mel_device in mel_devices[DEVICE_TYPE_ATA]
if description.enabled(mel_device)
] + [
MelDeviceSensor(mel_device, description)
for description in ATW_SENSORS
for mel_device in mel_devices[DEVICE_TYPE_ATW]
if description.enabled(mel_device)
]
entities.extend(
[
AtwZoneSensor(mel_device, zone, description)
for mel_device in mel_devices[DEVICE_TYPE_ATW]
for zone in mel_device.device.zones
for description in ATW_ZONE_SENSORS
if description.enabled(zone)
]
)
async_add_entities(entities, True)
class MelDeviceSensor(SensorEntity):
"""Representation of a Sensor."""
entity_description: MelcloudSensorEntityDescription
def __init__(
self,
api: MelCloudDevice,
description: MelcloudSensorEntityDescription,
) -> None:
"""Initialize the sensor."""
self._api = api
self.entity_description = description
self._attr_name = f"{api.name} {description.name}"
self._attr_unique_id = f"{api.device.serial}-{api.device.mac}-{description.key}"
if description.device_class == SensorDeviceClass.ENERGY:
self._attr_state_class = SensorStateClass.TOTAL_INCREASING
else:
self._attr_state_class = SensorStateClass.MEASUREMENT
@property
def native_value(self):
"""Return the state of the sensor."""
return self.entity_description.value_fn(self._api)
async def async_update(self):
"""Retrieve latest state."""
await self._api.async_update()
@property
def device_info(self):
"""Return a device description for device registry."""
return self._api.device_info
class AtwZoneSensor(MelDeviceSensor):
"""Air-to-Air device sensor."""
def __init__(
self,
api: MelCloudDevice,
zone: Zone,
description: MelcloudSensorEntityDescription,
) -> None:
"""Initialize the sensor."""
if zone.zone_index != 1:
description.key = f"{description.key}-zone-{zone.zone_index}"
super().__init__(api, description)
self._zone = zone
self._attr_name = f"{api.name} {zone.name} {description.name}"
@property
def native_value(self):
"""Return zone based state."""
return self.entity_description.value_fn(self._zone)
| rohitranjan1991/home-assistant | homeassistant/components/melcloud/sensor.py | Python | mit | 6,341 |
MOVEMENT = {
'^': (0, 1),
'v': (0, -1),
'<': (-1, 0),
'>': (1, 0)
}
def add_points(p1, p2):
return (p1[0] + p2[0], p1[1] + p2[1])
def main():
file_data = ''
with open('input.txt', 'r') as f:
file_data = f.read().strip()
positions = [(0, 0), (0, 0)]
visiteds = [[(0, 0)], [(0, 0)]]
who = 0
for instruction in file_data:
positions[who] = add_points(positions[who], MOVEMENT[instruction])
if positions[who] not in visiteds[who]:
visiteds[who].append(positions[who])
who = (who + 1) % 2
true_visited = []
for visited in visiteds:
for position in visited:
if position not in true_visited:
true_visited.append(position)
print(len(true_visited))
if __name__ == '__main__':
main()
| The6P4C/adventofcode | 2015/day3/part2.py | Python | gpl-3.0 | 716 |
"""Vera tests."""
from unittest.mock import MagicMock
import pyvera as pv
from homeassistant.components.light import ATTR_BRIGHTNESS, ATTR_HS_COLOR
from homeassistant.core import HomeAssistant
from .common import ComponentFactory, new_simple_controller_config
async def test_light(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device = MagicMock(spec=pv.VeraDimmer) # type: pv.VeraDimmer
vera_device.device_id = 1
vera_device.vera_device_id = vera_device.device_id
vera_device.comm_failure = False
vera_device.name = "dev1"
vera_device.category = pv.CATEGORY_DIMMER
vera_device.is_switched_on = MagicMock(return_value=False)
vera_device.get_brightness = MagicMock(return_value=0)
vera_device.get_color = MagicMock(return_value=[0, 0, 0])
vera_device.is_dimmable = True
entity_id = "light.dev1_1"
component_data = await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(devices=(vera_device,)),
)
update_callback = component_data.controller_data[0].update_callback
assert hass.states.get(entity_id).state == "off"
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": entity_id},
)
await hass.async_block_till_done()
vera_device.switch_on.assert_called()
vera_device.is_switched_on.return_value = True
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == "on"
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": entity_id, ATTR_HS_COLOR: [300, 70]},
)
await hass.async_block_till_done()
vera_device.set_color.assert_called_with((255, 76, 255))
vera_device.is_switched_on.return_value = True
vera_device.get_color.return_value = (255, 76, 255)
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == "on"
assert hass.states.get(entity_id).attributes["hs_color"] == (300.0, 70.196)
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": entity_id, ATTR_BRIGHTNESS: 55},
)
await hass.async_block_till_done()
vera_device.set_brightness.assert_called_with(55)
vera_device.is_switched_on.return_value = True
vera_device.get_brightness.return_value = 55
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == "on"
assert hass.states.get(entity_id).attributes["brightness"] == 55
await hass.services.async_call(
"light",
"turn_off",
{"entity_id": entity_id},
)
await hass.async_block_till_done()
vera_device.switch_off.assert_called()
vera_device.is_switched_on.return_value = False
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == "off"
| partofthething/home-assistant | tests/components/vera/test_light.py | Python | apache-2.0 | 3,023 |
"""Tests for Vizio init."""
import pytest
from homeassistant.components.media_player.const import DOMAIN as MP_DOMAIN
from homeassistant.components.vizio.const import DOMAIN
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
from .const import MOCK_SPEAKER_CONFIG, MOCK_USER_VALID_TV_CONFIG, UNIQUE_ID
from tests.common import MockConfigEntry
async def test_setup_component(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test component setup."""
assert await async_setup_component(
hass, DOMAIN, {DOMAIN: MOCK_USER_VALID_TV_CONFIG}
)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(MP_DOMAIN)) == 1
async def test_tv_load_and_unload(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test loading and unloading TV entry."""
config_entry = MockConfigEntry(
domain=DOMAIN, data=MOCK_USER_VALID_TV_CONFIG, unique_id=UNIQUE_ID
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(MP_DOMAIN)) == 1
assert DOMAIN in hass.data
assert await config_entry.async_unload(hass)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(MP_DOMAIN)) == 0
assert DOMAIN not in hass.data
async def test_speaker_load_and_unload(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test loading and unloading speaker entry."""
config_entry = MockConfigEntry(
domain=DOMAIN, data=MOCK_SPEAKER_CONFIG, unique_id=UNIQUE_ID
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(MP_DOMAIN)) == 1
assert DOMAIN in hass.data
assert await config_entry.async_unload(hass)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(MP_DOMAIN)) == 0
assert DOMAIN not in hass.data
| tboyce021/home-assistant | tests/components/vizio/test_init.py | Python | apache-2.0 | 2,257 |
#!/usr/bin/env python
# standard library imports
import sys
import os
import subprocess
import traceback
import logging
# KBase imports
import biokbase.Transform.script_utils as script_utils
def validate(input_directory, working_directory, level=logging.INFO, logger=None):
"""
Validates any file containing sequence data.
Args:
input_directory: A directory containing one or more SequenceRead files.
working_directory: A directory where any output files produced by validation can be written.
level: Logging level, defaults to logging.INFO.
Returns:
Currently writes to stderr with a Java Exception trace on error, otherwise no output.
Authors:
Srividya Ramikrishnan, Matt Henderson
"""
if logger is None:
logger = script_utils.stderrlogger(__file__)
fasta_extensions = [".fa",".fasta",".fna"]
fastq_extensions = [".fq",".fastq",".fnq"]
extensions = fasta_extensions + fastq_extensions
checked = False
validated = True
for input_file_name in os.listdir(input_directory):
logger.info("Checking for SequenceReads file : {0}".format(input_file_name))
filePath = os.path.join(os.path.abspath(input_directory), input_file_name)
if not os.path.isfile(filePath):
logger.warning("Skipping directory {0}".format(input_file_name))
continue
elif os.path.splitext(input_file_name)[-1] not in extensions:
logger.warning("Unrecognized file type, skipping.")
continue
logger.info("Starting SequenceReads validation of {0}".format(input_file_name))
if os.path.splitext(input_file_name)[-1] in fasta_extensions:
# TODO This needs to be changed, this is really just a demo program for this library and not a serious tool
java_classpath = os.path.join(os.environ.get("KB_TOP"), "lib/jars/FastaValidator/FastaValidator-1.0.jar")
arguments = ["java", "-classpath", java_classpath, "FVTester", filePath]
elif os.path.splitext(input_file_name)[-1] in fastq_extensions:
line_count = int(subprocess.check_output(["wc", "-l", filePath]).split()[0])
if line_count % 4 > 0:
logger.error("Validation failed on {0}, line count is not a multiple of 4!".format(input_file_name) +
" Often this is due to a new line character at the end of the file.")
validated = False
break
arguments = ["fastQValidator", "--file", filePath, "--maxErrors", "10"]
tool_process = subprocess.Popen(arguments, stderr=subprocess.PIPE)
stdout, stderr = tool_process.communicate()
if tool_process.returncode != 0:
logger.error("Validation failed on {0}".format(input_file_name))
validated = False
break
else:
logger.info("Validation passed on {0}".format(input_file_name))
checked = True
if not validated:
raise Exception("Validation failed!")
elif not checked:
raise Exception("No files were found that had a valid fasta or fastq extension.")
else:
logger.info("Validation passed.")
if __name__ == "__main__":
script_details = script_utils.parse_docs(validate.__doc__)
import argparse
parser = argparse.ArgumentParser(prog=__file__,
description=script_details["Description"],
epilog=script_details["Authors"])
parser.add_argument("--input_directory", help=script_details["Args"]["input_directory"], type=str, nargs="?", required=True)
parser.add_argument("--working_directory", help=script_details["Args"]["working_directory"], type=str, nargs="?", required=True)
args, unknown = parser.parse_known_args()
logger = script_utils.stderrlogger(__file__)
try:
validate(input_directory = args.input_directory,
working_directory = args.working_directory,
level = logging.DEBUG,
logger = logger)
except Exception, e:
logger.exception(e)
sys.exit(1)
sys.exit(0)
| aekazakov/transform | plugins/scripts/validate/trns_validate_Sequence.py | Python | mit | 4,346 |
# songgraph.py (mdb.songgraph)
# Copyright (C) 2014-2017 Timothy Woodford. All rights reserved.
# Create song-related graph structures
from datetime import timedelta
import random
import networkx as nx
def make_play_graph(sdb, grtype=nx.DiGraph):
""" Read the play times from sdb and return a NetworkX graph structure with nodes representing songs and
edges representing sequential plays.
"""
gr = grtype()
cur = sdb.cursor()
# We don't need timezone awareness here - songs that were played close together
cur.execute("SELECT song, unixtime FROM plays ORDER BY unixtime ASC")
prev = None
for row in cur.fetchall():
if prev:
if (row[1] - prev[1]) < 12*60: # Find difference in timestamps
nd1 = int(prev[0])
nd2 = int(row[0])
try:
gr[nd1][nd2]["weight"] /= 2
except KeyError:
gr.add_edge(nd1, nd2, weight=16)
prev = row
return gr
# TODO this is inherently sub-optimal - need a better way to do find a route
def possible_routes(start_key, graph, maxdepth, _visited=None):
""" Function to find all possible routes that we could take along the given graph,
starting at the song given by start_key, up to a certain maximum number of songs
in list. Note: The _visited parameter is for internal use only. This is a
recursive method, which places an upper limit on maxdepth.
"""
if _visited is None:
_visited = list()
if maxdepth == 1:
ret = [[song] for song in graph.successors(start_key) if not song in _visited]
else:
_visited.append(start_key)
ret = list()
for song in graph.successors(start_key):
if not song in _visited:
ret.extend([[start_key] + route for route in possible_routes(song, graph, maxdepth - 1, _visited)])
return ret
class _emptydict(object):
def __init__(self, default_value=1):
self.val = default_value
def __getitem__(self, index):
return self.val
def graph_walk(start_key, graph, song_weight=_emptydict(), max_depth=15, iter_depth=3, mean_selection_index=5):
sequence = [start_key]
last = start_key
lmbda=1/mean_selection_index
while len(sequence) < max_depth:
options = possible_routes(last, graph, iter_depth, _visited=list(sequence))
def _lstdens(lst):
return sum([song_weight[key] for key in lst])
options.sort(key=_lstdens, reverse=True)
if len(options)==0:
break
# Bias selection towards things that are earlier in list
choice = options[min(round(random.expovariate(lmbda)), len(options)-1)]
if len(choice)==0:
break
sequence.append(choice[1])
last = sequence[-1]
return sequence
def graph_walk_dual(start_key, graph, song_weight=_emptydict(0.05), max_len=30):
"""Like graph_walk_maxocc, but tries to go in reverse as well as forward along the graph.
Except if song_weight is set, in which case weights are adjusted by the song weights."""
if start_key not in graph: return [start_key]
seq = [start_key]
while len(seq) < max_len:
end = seq[-1]
start = seq[0]
sweigh = lambda x: song_weight[x] if song_weight[x] > 0 else 0.000000001
cand_end = [x for x in graph.successors(end) if x not in seq]
end_possible = [(x, graph[end][x]["weight"]/sweigh(x)) for x in cand_end]
end_possible.sort(key=lambda x: x[1], reverse=False)
cand_begin = [x for x in graph.predecessors(start) if x not in seq]
begin_possible = [(x, graph[x][start]["weight"]/sweigh(x)) for x in cand_begin]
begin_possible.sort(key=lambda x: x[1], reverse=False)
if len(end_possible) > 0:
if len(begin_possible) > 0:
# Both have at least 1 item
if end_possible[0][1] > begin_possible[0][1]:
# Append to end - end is better
seq.append(end_possible[0][0])
else:
# Insert at beginning - beginning is better
seq.insert(0, begin_possible[0][0])
else:
# Have end possibility, but no beginning possibility
seq.append(end_possible[0][0])
elif len(begin_possible) > 0:
# Have beginning possibility, but no end
seq.insert(0, begin_possible[0][0])
else:
# No possibilities at all :(
break
return seq
def graph_walk_maxocc(start_key, graph, song_weight=_emptydict(), max_depth=15):
"Walk the graph by always taking the song that has most frequently been played after the current song"
sequence = [start_key]
last = start_key
while len(sequence) < max_depth:
current = sequence[-1]
next_ = None
for succ in graph.successors(current):
print(succ, graph[current][succ]["weight"])
if next_ is None or graph[current][next_]["weight"] > graph[current][succ]["weight"]:
next_ = succ
sequence.append(next_)
break
return sequence
| twoodford/mdb | mdb/songgraph.py | Python | gpl-3.0 | 5,207 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import string
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
from heat.engine import translation
class RandomString(resource.Resource):
"""A resource which generates a random string.
This is useful for configuring passwords and secrets on services. Random
string can be generated from specified character sequences, which means
that all characters will be randomly chosen from specified sequences, or
with some classes, e.g. letterdigits, which means that all character will
be randomly chosen from union of ascii letters and digits. Output string
will be randomly generated string with specified length (or with length of
32, if length property doesn't specified).
"""
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
LENGTH, SEQUENCE, CHARACTER_CLASSES, CHARACTER_SEQUENCES,
SALT,
) = (
'length', 'sequence', 'character_classes', 'character_sequences',
'salt',
)
_CHARACTER_CLASSES_KEYS = (
CHARACTER_CLASSES_CLASS, CHARACTER_CLASSES_MIN,
) = (
'class', 'min',
)
_CHARACTER_SEQUENCES = (
CHARACTER_SEQUENCES_SEQUENCE, CHARACTER_SEQUENCES_MIN,
) = (
'sequence', 'min',
)
ATTRIBUTES = (
VALUE,
) = (
'value',
)
properties_schema = {
LENGTH: properties.Schema(
properties.Schema.INTEGER,
_('Length of the string to generate.'),
default=32,
constraints=[
constraints.Range(1, 512),
]
),
SEQUENCE: properties.Schema(
properties.Schema.STRING,
_('Sequence of characters to build the random string from.'),
constraints=[
constraints.AllowedValues(['lettersdigits', 'letters',
'lowercase', 'uppercase',
'digits', 'hexdigits',
'octdigits']),
],
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s.') % CHARACTER_CLASSES,
version='2014.2'
)
)
),
CHARACTER_CLASSES: properties.Schema(
properties.Schema.LIST,
_('A list of character class and their constraints to generate '
'the random string from.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
CHARACTER_CLASSES_CLASS: properties.Schema(
properties.Schema.STRING,
(_('A character class and its corresponding %(min)s '
'constraint to generate the random string from.')
% {'min': CHARACTER_CLASSES_MIN}),
constraints=[
constraints.AllowedValues(
['lettersdigits', 'letters', 'lowercase',
'uppercase', 'digits', 'hexdigits',
'octdigits']),
],
default='lettersdigits'),
CHARACTER_CLASSES_MIN: properties.Schema(
properties.Schema.INTEGER,
_('The minimum number of characters from this '
'character class that will be in the generated '
'string.'),
default=1,
constraints=[
constraints.Range(1, 512),
]
)
}
)
),
CHARACTER_SEQUENCES: properties.Schema(
properties.Schema.LIST,
_('A list of character sequences and their constraints to '
'generate the random string from.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
CHARACTER_SEQUENCES_SEQUENCE: properties.Schema(
properties.Schema.STRING,
_('A character sequence and its corresponding %(min)s '
'constraint to generate the random string '
'from.') % {'min': CHARACTER_SEQUENCES_MIN},
required=True),
CHARACTER_SEQUENCES_MIN: properties.Schema(
properties.Schema.INTEGER,
_('The minimum number of characters from this '
'sequence that will be in the generated '
'string.'),
default=1,
constraints=[
constraints.Range(1, 512),
]
)
}
)
),
SALT: properties.Schema(
properties.Schema.STRING,
_('Value which can be set or changed on stack update to trigger '
'the resource for replacement with a new random string. The '
'salt value itself is ignored by the random generator.')
),
}
attributes_schema = {
VALUE: attributes.Schema(
_('The random string generated by this resource. This value is '
'also available by referencing the resource.'),
cache_mode=attributes.Schema.CACHE_NONE,
type=attributes.Schema.STRING
),
}
_sequences = {
'lettersdigits': string.ascii_letters + string.digits,
'letters': string.ascii_letters,
'lowercase': string.ascii_lowercase,
'uppercase': string.ascii_uppercase,
'digits': string.digits,
'hexdigits': string.digits + 'ABCDEF',
'octdigits': string.octdigits
}
def translation_rules(self, props):
if props.get(self.SEQUENCE):
return [
translation.TranslationRule(
props,
translation.TranslationRule.ADD,
[self.CHARACTER_CLASSES],
[{self.CHARACTER_CLASSES_CLASS: props.get(
self.SEQUENCE),
self.CHARACTER_CLASSES_MIN: 1}]),
translation.TranslationRule(
props,
translation.TranslationRule.DELETE,
[self.SEQUENCE]
)
]
@staticmethod
def _deprecated_random_string(sequence, length):
rand = random.SystemRandom()
return ''.join(rand.choice(sequence) for x in six.moves.xrange(length))
def _generate_random_string(self, char_sequences, char_classes, length):
random_string = ""
# Add the minimum number of chars from each char sequence & char class
if char_sequences:
for char_seq in char_sequences:
seq = char_seq[self.CHARACTER_SEQUENCES_SEQUENCE]
seq_min = char_seq[self.CHARACTER_SEQUENCES_MIN]
for i in six.moves.xrange(seq_min):
random_string += random.choice(seq)
if char_classes:
for char_class in char_classes:
cclass_class = char_class[self.CHARACTER_CLASSES_CLASS]
cclass_seq = self._sequences[cclass_class]
cclass_min = char_class[self.CHARACTER_CLASSES_MIN]
for i in six.moves.xrange(cclass_min):
random_string += random.choice(cclass_seq)
def random_class_char():
cclass_dict = random.choice(char_classes)
cclass_class = cclass_dict[self.CHARACTER_CLASSES_CLASS]
cclass_seq = self._sequences[cclass_class]
return random.choice(cclass_seq)
def random_seq_char():
seq_dict = random.choice(char_sequences)
seq = seq_dict[self.CHARACTER_SEQUENCES_SEQUENCE]
return random.choice(seq)
# Fill up rest with random chars from provided sequences & classes
if char_sequences and char_classes:
weighted_choices = ([True] * len(char_classes) +
[False] * len(char_sequences))
while len(random_string) < length:
if random.choice(weighted_choices):
random_string += random_class_char()
else:
random_string += random_seq_char()
elif char_sequences:
while len(random_string) < length:
random_string += random_seq_char()
else:
while len(random_string) < length:
random_string += random_class_char()
# Randomize string
random_string = ''.join(random.sample(random_string,
len(random_string)))
return random_string
def validate(self):
super(RandomString, self).validate()
sequence = self.properties[self.SEQUENCE]
char_sequences = self.properties[self.CHARACTER_SEQUENCES]
char_classes = self.properties[self.CHARACTER_CLASSES]
if sequence and (char_sequences or char_classes):
msg = (_("Cannot use deprecated '%(seq)s' property along with "
"'%(char_seqs)s' or '%(char_classes)s' properties")
% {'seq': self.SEQUENCE,
'char_seqs': self.CHARACTER_SEQUENCES,
'char_classes': self.CHARACTER_CLASSES})
raise exception.StackValidationFailed(message=msg)
def char_min(char_dicts, min_prop):
if char_dicts:
return sum(char_dict[min_prop] for char_dict in char_dicts)
return 0
length = self.properties[self.LENGTH]
min_length = (char_min(char_sequences, self.CHARACTER_SEQUENCES_MIN) +
char_min(char_classes, self.CHARACTER_CLASSES_MIN))
if min_length > length:
msg = _("Length property cannot be smaller than combined "
"character class and character sequence minimums")
raise exception.StackValidationFailed(message=msg)
def handle_create(self):
char_sequences = self.properties[self.CHARACTER_SEQUENCES]
char_classes = self.properties[self.CHARACTER_CLASSES]
length = self.properties[self.LENGTH]
if char_sequences or char_classes:
random_string = self._generate_random_string(char_sequences,
char_classes,
length)
else:
sequence = self.properties[self.SEQUENCE]
if not sequence: # Deprecated property not provided, use a default
sequence = "lettersdigits"
char_seq = self._sequences[sequence]
random_string = self._deprecated_random_string(char_seq, length)
self.data_set('value', random_string, redact=True)
self.resource_id_set(self.physical_resource_name())
def _resolve_attribute(self, name):
if name == self.VALUE:
return self.data().get(self.VALUE)
def get_reference_id(self):
if self.resource_id is not None:
return self.data().get('value')
else:
return six.text_type(self.name)
def resource_mapping():
return {
'OS::Heat::RandomString': RandomString,
}
| dims/heat | heat/engine/resources/openstack/heat/random_string.py | Python | apache-2.0 | 12,500 |
from base import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0$ip1fb5xtq%a=)-k_4r^(#jn0t^@+*^kihkxkozg-mip7+w3+'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'sigi', # will be actually used as "test_sigi" by pytest-django
'USER': 'sigi',
'PASSWORD': 'sigi',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Validate arguments in django-dynamic-fixture
# http://django-dynamic-fixture.readthedocs.org/en/latest/more.html?highlight=ddf_validate_args#validate-arguments-new-in-1-5-0
DDF_VALIDATE_ARGS = True
DDF_DEBUG_MODE = True
DDF_DEFAULT_DATA_FIXTURE = 'sigi.testutils.SigiDataFixture'
| interlegis/sigi | sigi/settings/test.py | Python | gpl-2.0 | 723 |
from Controller.CommandController import CommandController
from Domain.Discipline import Discipline
@CommandController.addCommand(
name="addDiscipline",
help="Adds a Discipline to the database"
)
def addDiscipline(studentCatalogController, disciplineId: int, disciplineName: str):
studentCatalogController.addDiscipline(Discipline(disciplineId, disciplineName))
return 1
@CommandController.addCommand(
name="updateDiscipline",
help="Updates the name of a Discipline"
)
def updateDiscipline(studentCatalogController, disciplineId: int, updatedDisciplineName: str):
studentCatalogController.updateDiscipline(disciplineId, updatedDisciplineName)
return 1
@CommandController.addCommand(
name="removeDiscipline",
help="Removes Discipline with ID"
)
def removeDiscipline(studentCatalogController, disciplineId: int):
studentCatalogController.removeDisciplineById(disciplineId)
return 1 | Zephyrrus/ubb | YEAR 1/SEM1/FP/LAB/l6-l9/Commands/DisciplineCommands.py | Python | mit | 930 |
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import NumericProperty
from kivy.properties import ObjectProperty
from kivy.uix.boxlayout import BoxLayout
Builder.load_string('''
#:import light plyer.light
<LightInterface>:
light: light
orientation: 'vertical'
padding: '50dp'
spacing: '50dp'
BoxLayout:
orientation: 'horizontal'
size_hint_y: 0.3
Button:
id: button_enable
text: 'Enable'
disabled: False
on_release:
root.enable()
button_disable.disabled = not button_disable.disabled
button_enable.disabled = not button_enable.disabled
Button:
id: button_disable
text: 'Disable'
disabled: True
on_release:
root.disable()
button_disable.disabled = not button_disable.disabled
button_enable.disabled = not button_enable.disabled
Label:
text: 'Current illumination:' + str(root.illumination) + ' lx.'
''')
class LightInterface(BoxLayout):
'''Root Widget.'''
light = ObjectProperty()
illumination = NumericProperty()
def enable(self):
self.light.enable()
Clock.schedule_interval(self.get_illumination, 1 / 20.)
def disable(self):
self.light.disable()
Clock.unschedule(self.get_illumination)
def get_illumination(self, dt):
self.illumination = self.light.illumination or self.illumination
class LightApp(App):
def build(self):
return LightInterface()
def on_pause(self):
return True
if __name__ == '__main__':
LightApp().run()
| kived/plyer | examples/light/main.py | Python | mit | 1,744 |
# -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansCuneiform-Regular'
native_name = ''
def glyphs(self):
glyphs = []
glyphs.append(0x0173) #glyph00371
glyphs.append(0x030A) #glyph00778
glyphs.append(0x030B) #glyph00779
glyphs.append(0x02EB) #glyph00747
glyphs.append(0x024D) #glyph00589
glyphs.append(0x0001) #uniFEFF
glyphs.append(0x02E8) #glyph00744
glyphs.append(0x02E7) #glyph00743
glyphs.append(0x02E6) #glyph00742
glyphs.append(0x01AC) #glyph00428
glyphs.append(0x01AD) #glyph00429
glyphs.append(0x01AA) #glyph00426
glyphs.append(0x01AB) #glyph00427
glyphs.append(0x01A8) #glyph00424
glyphs.append(0x01A9) #glyph00425
glyphs.append(0x01A6) #glyph00422
glyphs.append(0x01A7) #glyph00423
glyphs.append(0x01A4) #glyph00420
glyphs.append(0x01A5) #glyph00421
glyphs.append(0x0302) #glyph00770
glyphs.append(0x0303) #glyph00771
glyphs.append(0x0183) #glyph00387
glyphs.append(0x0182) #glyph00386
glyphs.append(0x0181) #glyph00385
glyphs.append(0x0180) #glyph00384
glyphs.append(0x017F) #glyph00383
glyphs.append(0x017E) #glyph00382
glyphs.append(0x0044) #glyph00068
glyphs.append(0x0045) #glyph00069
glyphs.append(0x0042) #glyph00066
glyphs.append(0x0043) #glyph00067
glyphs.append(0x0040) #glyph00064
glyphs.append(0x0041) #glyph00065
glyphs.append(0x003E) #glyph00062
glyphs.append(0x003F) #glyph00063
glyphs.append(0x003C) #glyph00060
glyphs.append(0x003D) #glyph00061
glyphs.append(0x008D) #glyph00141
glyphs.append(0x008C) #glyph00140
glyphs.append(0x008F) #glyph00143
glyphs.append(0x008E) #glyph00142
glyphs.append(0x0091) #glyph00145
glyphs.append(0x0090) #glyph00144
glyphs.append(0x0093) #glyph00147
glyphs.append(0x0092) #glyph00146
glyphs.append(0x0095) #glyph00149
glyphs.append(0x0094) #glyph00148
glyphs.append(0x0277) #glyph00631
glyphs.append(0x0276) #glyph00630
glyphs.append(0x027D) #glyph00637
glyphs.append(0x027C) #glyph00636
glyphs.append(0x027B) #glyph00635
glyphs.append(0x027A) #glyph00634
glyphs.append(0x02DC) #glyph00732
glyphs.append(0x02DD) #glyph00733
glyphs.append(0x02DA) #glyph00730
glyphs.append(0x02DB) #glyph00731
glyphs.append(0x02E0) #glyph00736
glyphs.append(0x02E1) #glyph00737
glyphs.append(0x02DE) #glyph00734
glyphs.append(0x02DF) #glyph00735
glyphs.append(0x02E2) #glyph00738
glyphs.append(0x02E3) #glyph00739
glyphs.append(0x02E9) #glyph00745
glyphs.append(0x02E5) #glyph00741
glyphs.append(0x02E4) #glyph00740
glyphs.append(0x03CB) #glyph00971
glyphs.append(0x0314) #glyph00788
glyphs.append(0x0374) #glyph00884
glyphs.append(0x0375) #glyph00885
glyphs.append(0x0376) #glyph00886
glyphs.append(0x0377) #glyph00887
glyphs.append(0x0370) #glyph00880
glyphs.append(0x0371) #glyph00881
glyphs.append(0x0372) #glyph00882
glyphs.append(0x0373) #glyph00883
glyphs.append(0x039F) #glyph00927
glyphs.append(0x039E) #glyph00926
glyphs.append(0x039D) #glyph00925
glyphs.append(0x039C) #glyph00924
glyphs.append(0x0378) #glyph00888
glyphs.append(0x0379) #glyph00889
glyphs.append(0x0399) #glyph00921
glyphs.append(0x0398) #glyph00920
glyphs.append(0x02ED) #glyph00749
glyphs.append(0x03CA) #glyph00970
glyphs.append(0x02EC) #glyph00748
glyphs.append(0x03D3) #glyph00979
glyphs.append(0x0032) #glyph00050
glyphs.append(0x0239) #glyph00569
glyphs.append(0x0238) #glyph00568
glyphs.append(0x0237) #glyph00567
glyphs.append(0x0236) #glyph00566
glyphs.append(0x0235) #glyph00565
glyphs.append(0x0234) #glyph00564
glyphs.append(0x0233) #glyph00563
glyphs.append(0x0232) #glyph00562
glyphs.append(0x0231) #glyph00561
glyphs.append(0x0230) #glyph00560
glyphs.append(0x00CE) #glyph00206
glyphs.append(0x00CF) #glyph00207
glyphs.append(0x00CC) #glyph00204
glyphs.append(0x00CD) #glyph00205
glyphs.append(0x00CA) #glyph00202
glyphs.append(0x00CB) #glyph00203
glyphs.append(0x00C8) #glyph00200
glyphs.append(0x00C9) #glyph00201
glyphs.append(0x0169) #glyph00361
glyphs.append(0x0168) #glyph00360
glyphs.append(0x016B) #glyph00363
glyphs.append(0x016A) #glyph00362
glyphs.append(0x016D) #glyph00365
glyphs.append(0x016C) #glyph00364
glyphs.append(0x00D0) #glyph00208
glyphs.append(0x00D1) #glyph00209
glyphs.append(0x032D) #glyph00813
glyphs.append(0x032C) #glyph00812
glyphs.append(0x032B) #glyph00811
glyphs.append(0x032A) #glyph00810
glyphs.append(0x0331) #glyph00817
glyphs.append(0x0330) #glyph00816
glyphs.append(0x032F) #glyph00815
glyphs.append(0x032E) #glyph00814
glyphs.append(0x03B8) #glyph00952
glyphs.append(0x03B9) #glyph00953
glyphs.append(0x0333) #glyph00819
glyphs.append(0x0332) #glyph00818
glyphs.append(0x03BC) #glyph00956
glyphs.append(0x03BD) #glyph00957
glyphs.append(0x03BA) #glyph00954
glyphs.append(0x021C) #glyph00540
glyphs.append(0x02BF) #glyph00703
glyphs.append(0x037C) #glyph00892
glyphs.append(0x0206) #glyph00518
glyphs.append(0x0207) #glyph00519
glyphs.append(0x0388) #glyph00904
glyphs.append(0x0200) #glyph00512
glyphs.append(0x0201) #glyph00513
glyphs.append(0x01FE) #glyph00510
glyphs.append(0x01FF) #glyph00511
glyphs.append(0x0204) #glyph00516
glyphs.append(0x0205) #glyph00517
glyphs.append(0x0202) #glyph00514
glyphs.append(0x0203) #glyph00515
glyphs.append(0x037E) #glyph00894
glyphs.append(0x006D) #glyph00109
glyphs.append(0x006C) #glyph00108
glyphs.append(0x0069) #glyph00105
glyphs.append(0x0068) #glyph00104
glyphs.append(0x006B) #glyph00107
glyphs.append(0x006A) #glyph00106
glyphs.append(0x0065) #glyph00101
glyphs.append(0x0064) #glyph00100
glyphs.append(0x0067) #glyph00103
glyphs.append(0x0066) #glyph00102
glyphs.append(0x02A5) #glyph00677
glyphs.append(0x02A4) #glyph00676
glyphs.append(0x02A3) #glyph00675
glyphs.append(0x02A2) #glyph00674
glyphs.append(0x02A1) #glyph00673
glyphs.append(0x02A0) #glyph00672
glyphs.append(0x029F) #glyph00671
glyphs.append(0x029E) #glyph00670
glyphs.append(0x0308) #glyph00776
glyphs.append(0x0309) #glyph00777
glyphs.append(0x0306) #glyph00774
glyphs.append(0x0307) #glyph00775
glyphs.append(0x0304) #glyph00772
glyphs.append(0x0305) #glyph00773
glyphs.append(0x02A7) #glyph00679
glyphs.append(0x02A6) #glyph00678
glyphs.append(0x01D7) #glyph00471
glyphs.append(0x01D6) #glyph00470
glyphs.append(0x01D9) #glyph00473
glyphs.append(0x01D8) #glyph00472
glyphs.append(0x01DB) #glyph00475
glyphs.append(0x01DA) #glyph00474
glyphs.append(0x01DD) #glyph00477
glyphs.append(0x01DC) #glyph00476
glyphs.append(0x01DF) #glyph00479
glyphs.append(0x01DE) #glyph00478
glyphs.append(0x03A1) #glyph00929
glyphs.append(0x0355) #glyph00853
glyphs.append(0x001F) #glyph00031
glyphs.append(0x001E) #glyph00030
glyphs.append(0x0021) #glyph00033
glyphs.append(0x0020) #glyph00032
glyphs.append(0x0023) #glyph00035
glyphs.append(0x0022) #glyph00034
glyphs.append(0x0025) #glyph00037
glyphs.append(0x0024) #glyph00036
glyphs.append(0x0027) #glyph00039
glyphs.append(0x0026) #glyph00038
glyphs.append(0x0356) #glyph00854
glyphs.append(0x0354) #glyph00852
glyphs.append(0x025A) #glyph00602
glyphs.append(0x025B) #glyph00603
glyphs.append(0x0258) #glyph00600
glyphs.append(0x0259) #glyph00601
glyphs.append(0x025E) #glyph00606
glyphs.append(0x017D) #glyph00381
glyphs.append(0x025C) #glyph00604
glyphs.append(0x025D) #glyph00605
glyphs.append(0x0260) #glyph00608
glyphs.append(0x017C) #glyph00380
glyphs.append(0x0244) #glyph00580
glyphs.append(0x0353) #glyph00851
glyphs.append(0x031F) #glyph00799
glyphs.append(0x02F6) #glyph00758
glyphs.append(0x0225) #glyph00549
glyphs.append(0x0185) #glyph00389
glyphs.append(0x0352) #glyph00850
glyphs.append(0x0184) #glyph00388
glyphs.append(0x03B4) #glyph00948
glyphs.append(0x025F) #glyph00607
glyphs.append(0x0145) #glyph00325
glyphs.append(0x0144) #glyph00324
glyphs.append(0x0147) #glyph00327
glyphs.append(0x0146) #glyph00326
glyphs.append(0x0141) #glyph00321
glyphs.append(0x0140) #glyph00320
glyphs.append(0x0143) #glyph00323
glyphs.append(0x0142) #glyph00322
glyphs.append(0x0054) #glyph00084
glyphs.append(0x0055) #glyph00085
glyphs.append(0x0056) #glyph00086
glyphs.append(0x0057) #glyph00087
glyphs.append(0x0149) #glyph00329
glyphs.append(0x0148) #glyph00328
glyphs.append(0x0052) #glyph00082
glyphs.append(0x0053) #glyph00083
glyphs.append(0x02BB) #glyph00699
glyphs.append(0x02BA) #glyph00698
glyphs.append(0x0392) #glyph00914
glyphs.append(0x027E) #glyph00638
glyphs.append(0x0390) #glyph00912
glyphs.append(0x0391) #glyph00913
glyphs.append(0x038E) #glyph00910
glyphs.append(0x038F) #glyph00911
glyphs.append(0x02B3) #glyph00691
glyphs.append(0x02B2) #glyph00690
glyphs.append(0x02B5) #glyph00693
glyphs.append(0x02B4) #glyph00692
glyphs.append(0x02B7) #glyph00695
glyphs.append(0x02B6) #glyph00694
glyphs.append(0x02B9) #glyph00697
glyphs.append(0x02B8) #glyph00696
glyphs.append(0x0261) #glyph00609
glyphs.append(0x0279) #glyph00633
glyphs.append(0x0278) #glyph00632
glyphs.append(0x0103) #glyph00259
glyphs.append(0x0102) #glyph00258
glyphs.append(0x022A) #glyph00554
glyphs.append(0x022B) #glyph00555
glyphs.append(0x0228) #glyph00552
glyphs.append(0x0076) #glyph00118
glyphs.append(0x0226) #glyph00550
glyphs.append(0x0227) #glyph00551
glyphs.append(0x00FB) #glyph00251
glyphs.append(0x00FA) #glyph00250
glyphs.append(0x00FD) #glyph00253
glyphs.append(0x00FC) #glyph00252
glyphs.append(0x00FF) #glyph00255
glyphs.append(0x00FE) #glyph00254
glyphs.append(0x0101) #glyph00257
glyphs.append(0x0100) #glyph00256
glyphs.append(0x033C) #glyph00828
glyphs.append(0x033D) #glyph00829
glyphs.append(0x0336) #glyph00822
glyphs.append(0x0337) #glyph00823
glyphs.append(0x0334) #glyph00820
glyphs.append(0x0335) #glyph00821
glyphs.append(0x033A) #glyph00826
glyphs.append(0x033B) #glyph00827
glyphs.append(0x0338) #glyph00824
glyphs.append(0x0339) #glyph00825
glyphs.append(0x021D) #glyph00541
glyphs.append(0x01B7) #glyph00439
glyphs.append(0x003B) #glyph00059
glyphs.append(0x01B3) #glyph00435
glyphs.append(0x01B2) #glyph00434
glyphs.append(0x01B5) #glyph00437
glyphs.append(0x003A) #glyph00058
glyphs.append(0x01AF) #glyph00431
glyphs.append(0x01AE) #glyph00430
glyphs.append(0x01B1) #glyph00433
glyphs.append(0x01B0) #glyph00432
glyphs.append(0x02C5) #glyph00709
glyphs.append(0x02C4) #glyph00708
glyphs.append(0x0394) #glyph00916
glyphs.append(0x03B5) #glyph00949
glyphs.append(0x024C) #glyph00588
glyphs.append(0x0350) #glyph00848
glyphs.append(0x004F) #glyph00079
glyphs.append(0x004E) #glyph00078
glyphs.append(0x004B) #glyph00075
glyphs.append(0x004A) #glyph00074
glyphs.append(0x004D) #glyph00077
glyphs.append(0x004C) #glyph00076
glyphs.append(0x0047) #glyph00071
glyphs.append(0x0046) #glyph00070
glyphs.append(0x0049) #glyph00073
glyphs.append(0x0048) #glyph00072
glyphs.append(0x00AE) #glyph00174
glyphs.append(0x00AF) #glyph00175
glyphs.append(0x00B0) #glyph00176
glyphs.append(0x00B1) #glyph00177
glyphs.append(0x00AA) #glyph00170
glyphs.append(0x00AB) #glyph00171
glyphs.append(0x00AC) #glyph00172
glyphs.append(0x00AD) #glyph00173
glyphs.append(0x0286) #glyph00646
glyphs.append(0x0287) #glyph00647
glyphs.append(0x0284) #glyph00644
glyphs.append(0x0285) #glyph00645
glyphs.append(0x00B2) #glyph00178
glyphs.append(0x00B3) #glyph00179
glyphs.append(0x0280) #glyph00640
glyphs.append(0x0281) #glyph00641
glyphs.append(0x01E8) #glyph00488
glyphs.append(0x01E9) #glyph00489
glyphs.append(0x02D3) #glyph00723
glyphs.append(0x02D2) #glyph00722
glyphs.append(0x02D5) #glyph00725
glyphs.append(0x02D4) #glyph00724
glyphs.append(0x02D7) #glyph00727
glyphs.append(0x02D6) #glyph00726
glyphs.append(0x01E0) #glyph00480
glyphs.append(0x01E1) #glyph00481
glyphs.append(0x01E2) #glyph00482
glyphs.append(0x01E3) #glyph00483
glyphs.append(0x01E4) #glyph00484
glyphs.append(0x01E5) #glyph00485
glyphs.append(0x01E6) #glyph00486
glyphs.append(0x01E7) #glyph00487
glyphs.append(0x0120) #glyph00288
glyphs.append(0x0121) #glyph00289
glyphs.append(0x011E) #glyph00286
glyphs.append(0x011F) #glyph00287
glyphs.append(0x011C) #glyph00284
glyphs.append(0x011D) #glyph00285
glyphs.append(0x011A) #glyph00282
glyphs.append(0x011B) #glyph00283
glyphs.append(0x0118) #glyph00280
glyphs.append(0x0119) #glyph00281
glyphs.append(0x0383) #glyph00899
glyphs.append(0x0382) #glyph00898
glyphs.append(0x022C) #glyph00556
glyphs.append(0x037D) #glyph00893
glyphs.append(0x0166) #glyph00358
glyphs.append(0x037B) #glyph00891
glyphs.append(0x037A) #glyph00890
glyphs.append(0x0381) #glyph00897
glyphs.append(0x0380) #glyph00896
glyphs.append(0x037F) #glyph00895
glyphs.append(0x0167) #glyph00359
glyphs.append(0x02F0) #glyph00752
glyphs.append(0x02F1) #glyph00753
glyphs.append(0x031E) #glyph00798
glyphs.append(0x0000) #.notdef
glyphs.append(0x031A) #glyph00794
glyphs.append(0x031B) #glyph00795
glyphs.append(0x031C) #glyph00796
glyphs.append(0x031D) #glyph00797
glyphs.append(0x0316) #glyph00790
glyphs.append(0x0317) #glyph00791
glyphs.append(0x0318) #glyph00792
glyphs.append(0x0319) #glyph00793
glyphs.append(0x015E) #glyph00350
glyphs.append(0x015F) #glyph00351
glyphs.append(0x0250) #glyph00592
glyphs.append(0x0251) #glyph00593
glyphs.append(0x024E) #glyph00590
glyphs.append(0x024F) #glyph00591
glyphs.append(0x0254) #glyph00596
glyphs.append(0x0255) #glyph00597
glyphs.append(0x0252) #glyph00594
glyphs.append(0x0253) #glyph00595
glyphs.append(0x0256) #glyph00598
glyphs.append(0x0257) #glyph00599
glyphs.append(0x00D7) #glyph00215
glyphs.append(0x00D6) #glyph00214
glyphs.append(0x00D9) #glyph00217
glyphs.append(0x00D8) #glyph00216
glyphs.append(0x00D3) #glyph00211
glyphs.append(0x00D2) #glyph00210
glyphs.append(0x00D5) #glyph00213
glyphs.append(0x00D4) #glyph00212
glyphs.append(0x0162) #glyph00354
glyphs.append(0x0163) #glyph00355
glyphs.append(0x0164) #glyph00356
glyphs.append(0x0165) #glyph00357
glyphs.append(0x00DB) #glyph00219
glyphs.append(0x00DA) #glyph00218
glyphs.append(0x0160) #glyph00352
glyphs.append(0x0161) #glyph00353
glyphs.append(0x0362) #glyph00866
glyphs.append(0x0363) #glyph00867
glyphs.append(0x0360) #glyph00864
glyphs.append(0x0361) #glyph00865
glyphs.append(0x035E) #glyph00862
glyphs.append(0x035F) #glyph00863
glyphs.append(0x035C) #glyph00860
glyphs.append(0x035D) #glyph00861
glyphs.append(0x03AD) #glyph00941
glyphs.append(0x03AC) #glyph00940
glyphs.append(0x03AF) #glyph00943
glyphs.append(0x03AE) #glyph00942
glyphs.append(0x03B1) #glyph00945
glyphs.append(0x03B0) #glyph00944
glyphs.append(0x0364) #glyph00868
glyphs.append(0x0365) #glyph00869
glyphs.append(0x0058) #glyph00088
glyphs.append(0x0059) #glyph00089
glyphs.append(0x01FD) #glyph00509
glyphs.append(0x01FC) #glyph00508
glyphs.append(0x035B) #glyph00859
glyphs.append(0x01F5) #glyph00501
glyphs.append(0x01F4) #glyph00500
glyphs.append(0x01F7) #glyph00503
glyphs.append(0x01F6) #glyph00502
glyphs.append(0x01F9) #glyph00505
glyphs.append(0x01F8) #glyph00504
glyphs.append(0x01FB) #glyph00507
glyphs.append(0x01FA) #glyph00506
glyphs.append(0x0359) #glyph00857
glyphs.append(0x0193) #glyph00403
glyphs.append(0x03B3) #glyph00947
glyphs.append(0x0358) #glyph00856
glyphs.append(0x03B2) #glyph00946
glyphs.append(0x0357) #glyph00855
glyphs.append(0x0082) #glyph00130
glyphs.append(0x0083) #glyph00131
glyphs.append(0x0084) #glyph00132
glyphs.append(0x0085) #glyph00133
glyphs.append(0x0086) #glyph00134
glyphs.append(0x0087) #glyph00135
glyphs.append(0x0088) #glyph00136
glyphs.append(0x0089) #glyph00137
glyphs.append(0x008A) #glyph00138
glyphs.append(0x008B) #glyph00139
glyphs.append(0x0051) #glyph00081
glyphs.append(0x0349) #glyph00841
glyphs.append(0x0301) #glyph00769
glyphs.append(0x0300) #glyph00768
glyphs.append(0x02FD) #glyph00765
glyphs.append(0x02FC) #glyph00764
glyphs.append(0x02FF) #glyph00767
glyphs.append(0x02FE) #glyph00766
glyphs.append(0x02F9) #glyph00761
glyphs.append(0x02F8) #glyph00760
glyphs.append(0x02FB) #glyph00763
glyphs.append(0x02FA) #glyph00762
glyphs.append(0x01BC) #glyph00444
glyphs.append(0x01BD) #glyph00445
glyphs.append(0x01BE) #glyph00446
glyphs.append(0x01BF) #glyph00447
glyphs.append(0x01B8) #glyph00440
glyphs.append(0x01B9) #glyph00441
glyphs.append(0x01BA) #glyph00442
glyphs.append(0x01BB) #glyph00443
glyphs.append(0x01C0) #glyph00448
glyphs.append(0x01C1) #glyph00449
glyphs.append(0x034E) #glyph00846
glyphs.append(0x0393) #glyph00915
glyphs.append(0x0004) #glyph00004
glyphs.append(0x0005) #glyph00005
glyphs.append(0x0006) #glyph00006
glyphs.append(0x0007) #glyph00007
glyphs.append(0x0008) #glyph00008
glyphs.append(0x0009) #glyph00009
glyphs.append(0x0263) #glyph00611
glyphs.append(0x0262) #glyph00610
glyphs.append(0x0265) #glyph00613
glyphs.append(0x0264) #glyph00612
glyphs.append(0x0267) #glyph00615
glyphs.append(0x0266) #glyph00614
glyphs.append(0x0269) #glyph00617
glyphs.append(0x0268) #glyph00616
glyphs.append(0x026B) #glyph00619
glyphs.append(0x026A) #glyph00618
glyphs.append(0x03A0) #glyph00928
glyphs.append(0x0396) #glyph00918
glyphs.append(0x0397) #glyph00919
glyphs.append(0x039B) #glyph00923
glyphs.append(0x0395) #glyph00917
glyphs.append(0x0188) #glyph00392
glyphs.append(0x039A) #glyph00922
glyphs.append(0x013E) #glyph00318
glyphs.append(0x013F) #glyph00319
glyphs.append(0x0189) #glyph00393
glyphs.append(0x0386) #glyph00902
glyphs.append(0x0136) #glyph00310
glyphs.append(0x0137) #glyph00311
glyphs.append(0x0138) #glyph00312
glyphs.append(0x0139) #glyph00313
glyphs.append(0x013A) #glyph00314
glyphs.append(0x013B) #glyph00315
glyphs.append(0x013C) #glyph00316
glyphs.append(0x013D) #glyph00317
glyphs.append(0x005D) #glyph00093
glyphs.append(0x005C) #glyph00092
glyphs.append(0x005B) #glyph00091
glyphs.append(0x005A) #glyph00090
glyphs.append(0x0061) #glyph00097
glyphs.append(0x0060) #glyph00096
glyphs.append(0x005F) #glyph00095
glyphs.append(0x005E) #glyph00094
glyphs.append(0x0063) #glyph00099
glyphs.append(0x0062) #glyph00098
glyphs.append(0x0389) #glyph00905
glyphs.append(0x018E) #glyph00398
glyphs.append(0x038B) #glyph00907
glyphs.append(0x038A) #glyph00906
glyphs.append(0x0385) #glyph00901
glyphs.append(0x0384) #glyph00900
glyphs.append(0x0387) #glyph00903
glyphs.append(0x018F) #glyph00399
glyphs.append(0x038D) #glyph00909
glyphs.append(0x038C) #glyph00908
glyphs.append(0x034F) #glyph00847
glyphs.append(0x0221) #glyph00545
glyphs.append(0x0220) #glyph00544
glyphs.append(0x0223) #glyph00547
glyphs.append(0x0222) #glyph00546
glyphs.append(0x010C) #glyph00268
glyphs.append(0x010D) #glyph00269
glyphs.append(0x021F) #glyph00543
glyphs.append(0x021E) #glyph00542
glyphs.append(0x0108) #glyph00264
glyphs.append(0x0109) #glyph00265
glyphs.append(0x010A) #glyph00266
glyphs.append(0x010B) #glyph00267
glyphs.append(0x0104) #glyph00260
glyphs.append(0x0105) #glyph00261
glyphs.append(0x0106) #glyph00262
glyphs.append(0x0107) #glyph00263
glyphs.append(0x0347) #glyph00839
glyphs.append(0x0346) #glyph00838
glyphs.append(0x033F) #glyph00831
glyphs.append(0x033E) #glyph00830
glyphs.append(0x0341) #glyph00833
glyphs.append(0x0340) #glyph00832
glyphs.append(0x0343) #glyph00835
glyphs.append(0x0342) #glyph00834
glyphs.append(0x0345) #glyph00837
glyphs.append(0x0344) #glyph00836
glyphs.append(0x00A9) #glyph00169
glyphs.append(0x0190) #glyph00400
glyphs.append(0x0191) #glyph00401
glyphs.append(0x0192) #glyph00402
glyphs.append(0x002B) #glyph00043
glyphs.append(0x0194) #glyph00404
glyphs.append(0x0195) #glyph00405
glyphs.append(0x0196) #glyph00406
glyphs.append(0x0197) #glyph00407
glyphs.append(0x0198) #glyph00408
glyphs.append(0x0199) #glyph00409
glyphs.append(0x0030) #glyph00048
glyphs.append(0x0031) #glyph00049
glyphs.append(0x0050) #glyph00080
glyphs.append(0x0028) #glyph00040
glyphs.append(0x0029) #glyph00041
glyphs.append(0x002A) #glyph00042
glyphs.append(0x00A8) #glyph00168
glyphs.append(0x002C) #glyph00044
glyphs.append(0x002D) #glyph00045
glyphs.append(0x002E) #glyph00046
glyphs.append(0x002F) #glyph00047
glyphs.append(0x00A3) #glyph00163
glyphs.append(0x00A2) #glyph00162
glyphs.append(0x00A1) #glyph00161
glyphs.append(0x00A0) #glyph00160
glyphs.append(0x00A7) #glyph00167
glyphs.append(0x00A6) #glyph00166
glyphs.append(0x00A5) #glyph00165
glyphs.append(0x00A4) #glyph00164
glyphs.append(0x0245) #glyph00581
glyphs.append(0x0273) #glyph00627
glyphs.append(0x0293) #glyph00659
glyphs.append(0x0292) #glyph00658
glyphs.append(0x028F) #glyph00655
glyphs.append(0x028E) #glyph00654
glyphs.append(0x0291) #glyph00657
glyphs.append(0x0290) #glyph00656
glyphs.append(0x028B) #glyph00651
glyphs.append(0x028A) #glyph00650
glyphs.append(0x028D) #glyph00653
glyphs.append(0x0229) #glyph00553
glyphs.append(0x02CA) #glyph00714
glyphs.append(0x02CB) #glyph00715
glyphs.append(0x02CC) #glyph00716
glyphs.append(0x02CD) #glyph00717
glyphs.append(0x02C6) #glyph00710
glyphs.append(0x02C7) #glyph00711
glyphs.append(0x01F3) #glyph00499
glyphs.append(0x01F2) #glyph00498
glyphs.append(0x01F1) #glyph00497
glyphs.append(0x01F0) #glyph00496
glyphs.append(0x01EF) #glyph00495
glyphs.append(0x01EE) #glyph00494
glyphs.append(0x01ED) #glyph00493
glyphs.append(0x01EC) #glyph00492
glyphs.append(0x01EB) #glyph00491
glyphs.append(0x01EA) #glyph00490
glyphs.append(0x02F7) #glyph00759
glyphs.append(0x012B) #glyph00299
glyphs.append(0x012A) #glyph00298
glyphs.append(0x0127) #glyph00295
glyphs.append(0x0126) #glyph00294
glyphs.append(0x0129) #glyph00297
glyphs.append(0x0128) #glyph00296
glyphs.append(0x0123) #glyph00291
glyphs.append(0x0122) #glyph00290
glyphs.append(0x0125) #glyph00293
glyphs.append(0x0124) #glyph00292
glyphs.append(0x022E) #glyph00558
glyphs.append(0x022F) #glyph00559
glyphs.append(0x03C1) #glyph00961
glyphs.append(0x00C4) #glyph00196
glyphs.append(0x00C5) #glyph00197
glyphs.append(0x00C2) #glyph00194
glyphs.append(0x00C3) #glyph00195
glyphs.append(0x00C0) #glyph00192
glyphs.append(0x00C1) #glyph00193
glyphs.append(0x00BE) #glyph00190
glyphs.append(0x00BF) #glyph00191
glyphs.append(0x03BE) #glyph00958
glyphs.append(0x00C6) #glyph00198
glyphs.append(0x00C7) #glyph00199
glyphs.append(0x030F) #glyph00783
glyphs.append(0x030E) #glyph00782
glyphs.append(0x030D) #glyph00781
glyphs.append(0x030C) #glyph00780
glyphs.append(0x0313) #glyph00787
glyphs.append(0x0312) #glyph00786
glyphs.append(0x0311) #glyph00785
glyphs.append(0x0310) #glyph00784
glyphs.append(0x0315) #glyph00789
glyphs.append(0x028C) #glyph00652
glyphs.append(0x0283) #glyph00643
glyphs.append(0x02C9) #glyph00713
glyphs.append(0x0171) #glyph00369
glyphs.append(0x0170) #glyph00368
glyphs.append(0x0003) #uni00A0
glyphs.append(0x00E4) #glyph00228
glyphs.append(0x00E5) #glyph00229
glyphs.append(0x0247) #glyph00583
glyphs.append(0x0246) #glyph00582
glyphs.append(0x0249) #glyph00585
glyphs.append(0x0248) #glyph00584
glyphs.append(0x024B) #glyph00587
glyphs.append(0x024A) #glyph00586
glyphs.append(0x00DC) #glyph00220
glyphs.append(0x00DD) #glyph00221
glyphs.append(0x00DE) #glyph00222
glyphs.append(0x00DF) #glyph00223
glyphs.append(0x00E0) #glyph00224
glyphs.append(0x00E1) #glyph00225
glyphs.append(0x00E2) #glyph00226
glyphs.append(0x00E3) #glyph00227
glyphs.append(0x0157) #glyph00343
glyphs.append(0x0156) #glyph00342
glyphs.append(0x0155) #glyph00341
glyphs.append(0x0154) #glyph00340
glyphs.append(0x015B) #glyph00347
glyphs.append(0x015A) #glyph00346
glyphs.append(0x0159) #glyph00345
glyphs.append(0x0158) #glyph00344
glyphs.append(0x015D) #glyph00349
glyphs.append(0x015C) #glyph00348
glyphs.append(0x036B) #glyph00875
glyphs.append(0x036A) #glyph00874
glyphs.append(0x036D) #glyph00877
glyphs.append(0x036C) #glyph00876
glyphs.append(0x0367) #glyph00871
glyphs.append(0x0366) #glyph00870
glyphs.append(0x0369) #glyph00873
glyphs.append(0x0368) #glyph00872
glyphs.append(0x03CE) #glyph00974
glyphs.append(0x03CF) #glyph00975
glyphs.append(0x03D0) #glyph00976
glyphs.append(0x03D1) #glyph00977
glyphs.append(0x036F) #glyph00879
glyphs.append(0x036E) #glyph00878
glyphs.append(0x03CC) #glyph00972
glyphs.append(0x03CD) #glyph00973
glyphs.append(0x02CE) #glyph00718
glyphs.append(0x02CF) #glyph00719
glyphs.append(0x016F) #glyph00367
glyphs.append(0x016E) #glyph00366
glyphs.append(0x0208) #glyph00520
glyphs.append(0x021A) #glyph00538
glyphs.append(0x021B) #glyph00539
glyphs.append(0x0216) #glyph00534
glyphs.append(0x0217) #glyph00535
glyphs.append(0x0218) #glyph00536
glyphs.append(0x0219) #glyph00537
glyphs.append(0x0212) #glyph00530
glyphs.append(0x0213) #glyph00531
glyphs.append(0x0214) #glyph00532
glyphs.append(0x0215) #glyph00533
glyphs.append(0x03BF) #glyph00959
glyphs.append(0x0351) #glyph00849
glyphs.append(0x007F) #glyph00127
glyphs.append(0x007E) #glyph00126
glyphs.append(0x007D) #glyph00125
glyphs.append(0x007C) #glyph00124
glyphs.append(0x007B) #glyph00123
glyphs.append(0x007A) #glyph00122
glyphs.append(0x0079) #glyph00121
glyphs.append(0x0078) #glyph00120
glyphs.append(0x0081) #glyph00129
glyphs.append(0x0080) #glyph00128
glyphs.append(0x02EE) #glyph00750
glyphs.append(0x02EF) #glyph00751
glyphs.append(0x01CB) #glyph00459
glyphs.append(0x01CA) #glyph00458
glyphs.append(0x02F2) #glyph00754
glyphs.append(0x02F3) #glyph00755
glyphs.append(0x02F4) #glyph00756
glyphs.append(0x02F5) #glyph00757
glyphs.append(0x01C5) #glyph00453
glyphs.append(0x01C4) #glyph00452
glyphs.append(0x01C3) #glyph00451
glyphs.append(0x01C2) #glyph00450
glyphs.append(0x01C9) #glyph00457
glyphs.append(0x01C8) #glyph00456
glyphs.append(0x01C7) #glyph00455
glyphs.append(0x01C6) #glyph00454
glyphs.append(0x03B7) #glyph00951
glyphs.append(0x02C8) #glyph00712
glyphs.append(0x03BB) #glyph00955
glyphs.append(0x02BE) #glyph00702
glyphs.append(0x01B6) #glyph00438
glyphs.append(0x0186) #glyph00390
glyphs.append(0x0187) #glyph00391
glyphs.append(0x0013) #glyph00019
glyphs.append(0x0012) #glyph00018
glyphs.append(0x018A) #glyph00394
glyphs.append(0x018B) #glyph00395
glyphs.append(0x018C) #glyph00396
glyphs.append(0x018D) #glyph00397
glyphs.append(0x000D) #glyph00013
glyphs.append(0x000C) #glyph00012
glyphs.append(0x000B) #glyph00011
glyphs.append(0x000A) #glyph00010
glyphs.append(0x0011) #glyph00017
glyphs.append(0x0010) #glyph00016
glyphs.append(0x000F) #glyph00015
glyphs.append(0x000E) #glyph00014
glyphs.append(0x0098) #glyph00152
glyphs.append(0x0099) #glyph00153
glyphs.append(0x0096) #glyph00150
glyphs.append(0x0097) #glyph00151
glyphs.append(0x009C) #glyph00156
glyphs.append(0x009D) #glyph00157
glyphs.append(0x009A) #glyph00154
glyphs.append(0x009B) #glyph00155
glyphs.append(0x03D9) #glyph00985
glyphs.append(0x03D8) #glyph00984
glyphs.append(0x009E) #glyph00158
glyphs.append(0x009F) #glyph00159
glyphs.append(0x03D5) #glyph00981
glyphs.append(0x03D4) #glyph00980
glyphs.append(0x03D7) #glyph00983
glyphs.append(0x03D6) #glyph00982
glyphs.append(0x0270) #glyph00624
glyphs.append(0x0271) #glyph00625
glyphs.append(0x0272) #glyph00626
glyphs.append(0x01B4) #glyph00436
glyphs.append(0x026C) #glyph00620
glyphs.append(0x026D) #glyph00621
glyphs.append(0x026E) #glyph00622
glyphs.append(0x026F) #glyph00623
glyphs.append(0x0274) #glyph00628
glyphs.append(0x0275) #glyph00629
glyphs.append(0x03C0) #glyph00960
glyphs.append(0x0135) #glyph00309
glyphs.append(0x0134) #glyph00308
glyphs.append(0x0133) #glyph00307
glyphs.append(0x0132) #glyph00306
glyphs.append(0x0131) #glyph00305
glyphs.append(0x0130) #glyph00304
glyphs.append(0x012F) #glyph00303
glyphs.append(0x012E) #glyph00302
glyphs.append(0x012D) #glyph00301
glyphs.append(0x012C) #glyph00300
glyphs.append(0x03AA) #glyph00938
glyphs.append(0x03AB) #glyph00939
glyphs.append(0x03C8) #glyph00968
glyphs.append(0x03A2) #glyph00930
glyphs.append(0x03A3) #glyph00931
glyphs.append(0x03A4) #glyph00932
glyphs.append(0x03A5) #glyph00933
glyphs.append(0x03A6) #glyph00934
glyphs.append(0x03A7) #glyph00935
glyphs.append(0x03A8) #glyph00936
glyphs.append(0x03A9) #glyph00937
glyphs.append(0x03C9) #glyph00969
glyphs.append(0x02EA) #glyph00746
glyphs.append(0x0242) #glyph00578
glyphs.append(0x0243) #glyph00579
glyphs.append(0x023A) #glyph00570
glyphs.append(0x023B) #glyph00571
glyphs.append(0x023C) #glyph00572
glyphs.append(0x023D) #glyph00573
glyphs.append(0x023E) #glyph00574
glyphs.append(0x023F) #glyph00575
glyphs.append(0x0240) #glyph00576
glyphs.append(0x0241) #glyph00577
glyphs.append(0x0111) #glyph00273
glyphs.append(0x0110) #glyph00272
glyphs.append(0x010F) #glyph00271
glyphs.append(0x010E) #glyph00270
glyphs.append(0x0115) #glyph00277
glyphs.append(0x0114) #glyph00276
glyphs.append(0x0113) #glyph00275
glyphs.append(0x0112) #glyph00274
glyphs.append(0x0174) #glyph00372
glyphs.append(0x0175) #glyph00373
glyphs.append(0x0117) #glyph00279
glyphs.append(0x0116) #glyph00278
glyphs.append(0x0178) #glyph00376
glyphs.append(0x0179) #glyph00377
glyphs.append(0x0176) #glyph00374
glyphs.append(0x0177) #glyph00375
glyphs.append(0x0328) #glyph00808
glyphs.append(0x0329) #glyph00809
glyphs.append(0x0324) #glyph00804
glyphs.append(0x0325) #glyph00805
glyphs.append(0x0326) #glyph00806
glyphs.append(0x0327) #glyph00807
glyphs.append(0x0320) #glyph00800
glyphs.append(0x0321) #glyph00801
glyphs.append(0x0322) #glyph00802
glyphs.append(0x0323) #glyph00803
glyphs.append(0x01A1) #glyph00417
glyphs.append(0x01A0) #glyph00416
glyphs.append(0x019F) #glyph00415
glyphs.append(0x019E) #glyph00414
glyphs.append(0x019D) #glyph00413
glyphs.append(0x019C) #glyph00412
glyphs.append(0x019B) #glyph00411
glyphs.append(0x019A) #glyph00410
glyphs.append(0x01A3) #glyph00419
glyphs.append(0x01A2) #glyph00418
glyphs.append(0x0039) #glyph00057
glyphs.append(0x0038) #glyph00056
glyphs.append(0x0037) #glyph00055
glyphs.append(0x0036) #glyph00054
glyphs.append(0x0035) #glyph00053
glyphs.append(0x0034) #glyph00052
glyphs.append(0x0033) #glyph00051
glyphs.append(0x0077) #glyph00119
glyphs.append(0x0074) #glyph00116
glyphs.append(0x0075) #glyph00117
glyphs.append(0x0072) #glyph00114
glyphs.append(0x0073) #glyph00115
glyphs.append(0x0070) #glyph00112
glyphs.append(0x0071) #glyph00113
glyphs.append(0x006E) #glyph00110
glyphs.append(0x006F) #glyph00111
glyphs.append(0x0294) #glyph00660
glyphs.append(0x0295) #glyph00661
glyphs.append(0x0296) #glyph00662
glyphs.append(0x0297) #glyph00663
glyphs.append(0x0298) #glyph00664
glyphs.append(0x0299) #glyph00665
glyphs.append(0x029A) #glyph00666
glyphs.append(0x029B) #glyph00667
glyphs.append(0x029C) #glyph00668
glyphs.append(0x029D) #glyph00669
glyphs.append(0x02BD) #glyph00701
glyphs.append(0x02BC) #glyph00700
glyphs.append(0x02C3) #glyph00707
glyphs.append(0x02C2) #glyph00706
glyphs.append(0x02C1) #glyph00705
glyphs.append(0x02C0) #glyph00704
glyphs.append(0x01CE) #glyph00462
glyphs.append(0x01CF) #glyph00463
glyphs.append(0x01CC) #glyph00460
glyphs.append(0x01CD) #glyph00461
glyphs.append(0x01D2) #glyph00466
glyphs.append(0x01D3) #glyph00467
glyphs.append(0x01D0) #glyph00464
glyphs.append(0x01D1) #glyph00465
glyphs.append(0x0002) #uni000D
glyphs.append(0x01D4) #glyph00468
glyphs.append(0x01D5) #glyph00469
glyphs.append(0x035A) #glyph00858
glyphs.append(0x0209) #glyph00521
glyphs.append(0x0016) #glyph00022
glyphs.append(0x0017) #glyph00023
glyphs.append(0x0014) #glyph00020
glyphs.append(0x0015) #glyph00021
glyphs.append(0x001A) #glyph00026
glyphs.append(0x001B) #glyph00027
glyphs.append(0x0018) #glyph00024
glyphs.append(0x0019) #glyph00025
glyphs.append(0x001C) #glyph00028
glyphs.append(0x001D) #glyph00029
glyphs.append(0x00B9) #glyph00185
glyphs.append(0x00B8) #glyph00184
glyphs.append(0x00BB) #glyph00187
glyphs.append(0x00BA) #glyph00186
glyphs.append(0x00B5) #glyph00181
glyphs.append(0x00B4) #glyph00180
glyphs.append(0x00B7) #glyph00183
glyphs.append(0x00B6) #glyph00182
glyphs.append(0x00BD) #glyph00189
glyphs.append(0x00BC) #glyph00188
glyphs.append(0x0211) #glyph00529
glyphs.append(0x03B6) #glyph00950
glyphs.append(0x0210) #glyph00528
glyphs.append(0x0288) #glyph00648
glyphs.append(0x0289) #glyph00649
glyphs.append(0x0224) #glyph00548
glyphs.append(0x00EF) #glyph00239
glyphs.append(0x00EE) #glyph00238
glyphs.append(0x00ED) #glyph00237
glyphs.append(0x00EC) #glyph00236
glyphs.append(0x00EB) #glyph00235
glyphs.append(0x00EA) #glyph00234
glyphs.append(0x00E9) #glyph00233
glyphs.append(0x00E8) #glyph00232
glyphs.append(0x00E7) #glyph00231
glyphs.append(0x00E6) #glyph00230
glyphs.append(0x0150) #glyph00336
glyphs.append(0x0151) #glyph00337
glyphs.append(0x014E) #glyph00334
glyphs.append(0x014F) #glyph00335
glyphs.append(0x014C) #glyph00332
glyphs.append(0x014D) #glyph00333
glyphs.append(0x014A) #glyph00330
glyphs.append(0x014B) #glyph00331
glyphs.append(0x0348) #glyph00840
glyphs.append(0x0282) #glyph00642
glyphs.append(0x034A) #glyph00842
glyphs.append(0x034B) #glyph00843
glyphs.append(0x034C) #glyph00844
glyphs.append(0x034D) #glyph00845
glyphs.append(0x0152) #glyph00338
glyphs.append(0x0153) #glyph00339
glyphs.append(0x03C3) #glyph00963
glyphs.append(0x03C2) #glyph00962
glyphs.append(0x02B0) #glyph00688
glyphs.append(0x02B1) #glyph00689
glyphs.append(0x03C7) #glyph00967
glyphs.append(0x03C6) #glyph00966
glyphs.append(0x03C5) #glyph00965
glyphs.append(0x03C4) #glyph00964
glyphs.append(0x02AA) #glyph00682
glyphs.append(0x02AB) #glyph00683
glyphs.append(0x02A8) #glyph00680
glyphs.append(0x02A9) #glyph00681
glyphs.append(0x02AE) #glyph00686
glyphs.append(0x02AF) #glyph00687
glyphs.append(0x02AC) #glyph00684
glyphs.append(0x02AD) #glyph00685
glyphs.append(0x02D1) #glyph00721
glyphs.append(0x02D0) #glyph00720
glyphs.append(0x03D2) #glyph00978
glyphs.append(0x017A) #glyph00378
glyphs.append(0x017B) #glyph00379
glyphs.append(0x027F) #glyph00639
glyphs.append(0x020B) #glyph00523
glyphs.append(0x020A) #glyph00522
glyphs.append(0x00F8) #glyph00248
glyphs.append(0x00F9) #glyph00249
glyphs.append(0x020F) #glyph00527
glyphs.append(0x020E) #glyph00526
glyphs.append(0x020D) #glyph00525
glyphs.append(0x020C) #glyph00524
glyphs.append(0x00F2) #glyph00242
glyphs.append(0x00F3) #glyph00243
glyphs.append(0x00F0) #glyph00240
glyphs.append(0x00F1) #glyph00241
glyphs.append(0x00F6) #glyph00246
glyphs.append(0x00F7) #glyph00247
glyphs.append(0x00F4) #glyph00244
glyphs.append(0x00F5) #glyph00245
glyphs.append(0x02D9) #glyph00729
glyphs.append(0x02D8) #glyph00728
glyphs.append(0x022D) #glyph00557
glyphs.append(0x0172) #glyph00370
return glyphs
| davelab6/pyfontaine | fontaine/charsets/noto_glyphs/notosanscuneiform_regular.py | Python | gpl-3.0 | 42,569 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "unshorten.tests.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| bitmazk/django-unshorten | manage.py | Python | mit | 259 |
##
# OOIPLACEHOLDER
#
# Copyright 2014 Raytheon Co.
##
__author__ = "mworden"
import os
from mi.logging import config
from mi.dataset.driver.moas.gl.dosta.driver_common import DostaAbcdjmGliderDriver
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.core.versioning import version
@version("15.6.0")
def parse(basePythonCodePath, sourceFilePath, particleDataHdlrObj):
config.add_configuration(os.path.join(basePythonCodePath, 'res', 'config', 'mi-logging.yml'))
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'DostaTelemeteredDataParticle',
}
driver = DostaAbcdjmGliderDriver(sourceFilePath, particleDataHdlrObj, parser_config)
return driver.process()
| JeffRoy/mi-dataset | mi/dataset/driver/moas/gl/dosta/dosta_abcdjm_glider_telemetered_driver.py | Python | bsd-2-clause | 816 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Making a PB & J sandwich."""
from task_01.peanut import BUTTER
JELLY = BUTTER
| slb6968/is210-week-05-warmup | task_03.py | Python | mpl-2.0 | 131 |
import colander
from flask import Blueprint, render_template, request, redirect
from flask.ext.login import current_user, login_user, logout_user
from flask import current_app
from sqlalchemy.sql.expression import desc, func, or_
from werkzeug.security import check_password_hash, generate_password_hash
from flaskboiler.core import db, login_manager
from flaskboiler.auth import require
from flaskboiler.model.dataset import Dataset
from flaskboiler.model.account import (Account, AccountRegister,
AccountSettings)
from flaskboiler.lib.jsonexport import jsonify
from flaskboiler.lib.helpers import flash_error
from flaskboiler.lib.helpers import flash_notice, flash_success
from wtforms import Form, TextField, PasswordField, validators
blueprint = Blueprint('account', __name__)
@login_manager.request_loader
def load_user_from_request(request):
api_key = request.args.get('api_key')
if api_key and len(api_key):
account = Account.by_api_key(api_key)
if account:
return account
api_key = request.headers.get('Authorization')
if api_key and len(api_key) and ' ' in api_key:
method, api_key = api_key.split(' ', 1)
if method.lower() == 'apikey':
account = Account.by_api_key(api_key)
if account:
return account
return None
@blueprint.route('/login', methods=['GET'])
def login():
""" Render the login/registration page. """
disable_cache()
return render_template('account/login.jade')
@blueprint.route('/login', methods=['POST', 'PUT'])
def login_perform():
account = Account.by_email(request.form.get('login'))
#if account is not None and account.verified == True:
if account is not None:
if check_password_hash(account.password, request.form.get('password')):
logout_user()
login_user(account, remember=True)
flash_success("Welcome back, " + account.fullname + "!")
return redirect(url_for('home.index'))
flash_error(_("Incorrect user name or password!"))
return login()
@blueprint.route('/register', methods=['POST', 'PUT'])
def register():
""" Perform registration of a new user """
disable_cache()
errors, values = {}, dict(request.form.items())
try:
# Grab the actual data and validate it
data = AccountRegister().deserialize(values)
#check if email is already registered
# it is, then send the email hash for the login
#check that email is real
#get the domain
print data['email']
if (data['email'].find('@') == -1 or data['email'].find('.') == -1):
raise colander.Invalid(AccountRegister.email,
"You must use a valid USG email address")
domain = data['email'][data['email'].find('@') + 1:]
if 'EMAIL_WHITELIST' not in current_app.config.keys():
raise colander.Invalid(AccountRegister.email,
"System not set correctly. Please contact the administrator.")
domainvalid = False
for domainemail in current_app.config['EMAIL_WHITELIST']:
if domain.lower() == domainemail.lower():
domainvalid = True
if not domainvalid:
raise colander.Invalid(AccountRegister.email,
"Your email is not available for registration. Currently it is only available for US Government emails.")
# Check if the username already exists, return an error if so
if Account.by_email(data['email']):
#resend the hash here to the email and notify the user
raise colander.Invalid(
AccountRegister.email,
"Login Name already exists. Click reset password.")
# Create the account
account = Account()
account.fullname = data['fullname']
account.email = data['email']
db.session.add(account)
db.session.commit()
# Perform a login for the user
#login_user(account, remember=True)
sendhash(account)
# TO DO redirect to email sent page
return redirect(url_for('account.email_message', id=account.id))
except colander.Invalid as i:
errors = i.asdict()
return render_template('account/login.jade', form_fill=values,
form_errors=errors)
| nathanhilbert/flaskboiler | flaskboiler/views/account.py | Python | agpl-3.0 | 4,417 |
from ase import Atoms
from gpaw import GPAW
from gpaw.utilities.sic import NSCFSIC
atoms = ['He','Be'] #,'Ne'] # Ne deviates already 2.5 eV
EE = []
EREF = [-79.4,-399.8,-3517.6]
for a in atoms:
s = Atoms(a)
s.center(vacuum=4.0)
calc = GPAW(h=0.15, txt=a + '.txt')
s.set_calculator(calc)
E = s.get_potential_energy()
EE.append(NSCFSIC(calc).calculate())
print "Difference to table VI of Phys. Rev. B 23, 5048 in eV"
#http://prola.aps.org/abstract/PRB/v23/i10/p5048_1
print "%10s%10s%10s%10s" % ("atom", "ref.", "gpaw", "diff")
for a, er, e in zip(atoms, EREF, EE):
print "%10s%10.2f%10.2f%10.2f" % (a, er, e, er-e)
assert abs(er-e)<0.1
# Arbitary 0.1 eV tolerance for non-self consistent SIC
# Note that Ne already deviates 2.5 eV
| qsnake/gpaw | gpaw/test/nscfsic.py | Python | gpl-3.0 | 774 |
# Generated by Django 2.2.16 on 2020-09-23 18:08
from django.db import migrations, models
import django_jsonfield_backport.models
class Migration(migrations.Migration):
dependencies = [
('reviewers', '0008_auto_20200730_1335'),
]
operations = [
migrations.AlterField(
model_name='autoapprovalsummary',
name='weight_info',
field=django_jsonfield_backport.models.JSONField(default=dict, null=True),
),
]
| bqbn/addons-server | src/olympia/reviewers/migrations/0009_auto_20200923_1808.py | Python | bsd-3-clause | 483 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import io, urllib.request, urllib.error, time, datetime
import pandas as pd
import sqlite3
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
# REF [site] >> https://tariat.tistory.com/892
def simple_example_1():
"""
한국거래소 XML 서비스 URL.
1. 실시간시세(국문).
http://asp1.krx.co.kr/servlet/krx.asp.XMLSise?code=단축종목코드
2. 실시간시세(영문).
http://asp1.krx.co.kr/servlet/krx.asp.XMLSiseEng?code=단축종목코드
3. 공시정보(국,영문).
http://asp1.krx.co.kr/servlet/krx.asp.DisList4MainServlet?code=단축코드&gubun=K (K:국문/E:영문)
4. 재무종합(국문)
http://asp1.krx.co.kr/servlet/krx.asp.XMLJemu?code=단축종목코드
5. 재무종합(영문).
http://asp1.krx.co.kr/servlet/krx.asp.XMLJemuEng?code=단축종목코드
6. 재무종합2(국문).
http://asp1.krx.co.kr/servlet/krx.asp.XMLJemu2?code=단축종목코드
7. 재무종합3(국문).
http://asp1.krx.co.kr/servlet/krx.asp.XMLJemu3?code=단축종목코드
8. 텍스트.
http://asp1.krx.co.kr/servlet/krx.asp.XMLText?code=단축종목코드
"""
def get_stock_from_krx(stock_code, try_cnt):
try:
url = "http://asp1.krx.co.kr/servlet/krx.asp.XMLSiseEng?code={}".format(stock_code)
req = urllib.request.urlopen(url)
result = req.read()
xmlsoup = BeautifulSoup(result, "lxml-xml")
stock = xmlsoup.find("TBL_StockInfo")
stock_df = pd.DataFrame(stock.attrs, index=[0])
stock_df = stock_df.applymap(lambda x: x.replace(",", ""))
return stock_df
except urllib.error.HTTPError as ex:
print("urllib.error.HTTPError raised: {}.".format(ex))
if try_cnt >= 3:
return None
else:
return get_stock_from_krx(stock_code, try_cnt=try_cnt + 1)
# Save to DB.
con = sqlite3.connect("./krx.db")
stock_codes = ["005930", "066570"]
for sc in tqdm(stock_codes):
stock_df = get_stock_from_krx(sc, 1)
stock_df.to_sql(con=con, name="div_stock_sise", if_exists="append")
time.sleep(0.5)
con.close()
def get_daily_price(date):
gen_otp_url = "http://marketdata.krx.co.kr/contents/COM/GenerateOTP.jspx"
gen_otp_data = {
"name": "fileDown",
"filetype": "csv",
"market_gubun": "ALL",
"url": "MKD/04/0404/04040200/mkd04040200_01",
"indx_ind_cd": "",
"sect_tp_cd": "ALL",
"schdate": date,
"pagePath": "/contents/MKD/04/0404/04040200/MKD04040200.jsp"
}
headers = {
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"x-requested-with": "XMLHttpRequest"
}
r = requests.get(gen_otp_url, headers=headers, params=gen_otp_data)
code = r.text
down_url = "http://file.krx.co.kr/download.jspx"
down_data = {
"code": code,
}
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7",
"cache-control": "max-age=0",
"content-length": "417",
"content-type": "application/x-www-form-urlencoded",
"origin": "https://marketdata.krx.co.kr",
"referer": "https://marketdata.krx.co.kr/",
"sec-fetch-dest": "iframe",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "same-site",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"
}
r = requests.post(down_url, data=down_data, headers=headers)
r.encoding = "utf-8-sig"
df = pd.read_csv(io.BytesIO(r.content), header=0, thousands=",")
#print(df)
return df
# REF [site] >> https://leesunkyu94.github.io/투자%20전략/divdend_stra/
def simple_example_2():
for i in range(0, 5):
date = (datetime.datetime.today() - datetime.timedelta(days=i)).strftime("%Y%m%d")
data_df = get_daily_price(date)
print(i, date)
if data_df.shape[0] != 0:
data_df.to_csv("./krx_{}.csv".format(date), encoding="CP949", index=False)
# REF [site] >> https://github.com/sharebook-kr/pykrx
def pykrx_market_data_example():
import pykrx.stock
if False:
#tickers = pykrx.stock.get_market_ticker_list()
#tickers = pykrx.stock.get_market_ticker_list("20190225")
tickers = pykrx.stock.get_market_ticker_list("20190225", market="KOSDAQ")
print(tickers)
for ticker in pykrx.stock.get_market_ticker_list():
ticker_name = pykrx.stock.get_market_ticker_name(ticker)
print(ticker_name)
if False:
df = pykrx.stock.get_market_ohlcv_by_date("20150720", "20150810", "005930")
#df = pykrx.stock.get_market_ohlcv_by_date("20180810", "20181212", "005930", freq="m")
print(df.head(3))
for ticker in pykrx.stock.get_market_ticker_list():
df = pykrx.stock.get_market_ohlcv_by_date("20181210", "20181212", ticker)
print(df.head())
time.sleep(1)
#--------------------
if False:
df = pykrx.stock.get_market_ohlcv_by_ticker("20210122")
#df = pykrx.stock.get_market_ohlcv_by_ticker("20200831", market="KOSPI")
#df = pykrx.stock.get_market_ohlcv_by_ticker("20200831", market="KOSDAQ")
#df = pykrx.stock.get_market_ohlcv_by_ticker("20200831", market="KONEX")
print(df.head(3))
df = pykrx.stock.get_market_price_change_by_ticker("20180301", "20180320")
print(df.head(2))
#--------------------
# DIV/BPS/PER/EPS.
if False:
df = pykrx.stock.get_market_fundamental_by_ticker("20210108")
#df = pykrx.stock.get_market_fundamental_by_ticker("20210104", market="KOSDAQ")
print(df.head(2))
df = pykrx.stock.get_market_fundamental_by_date("20210104", "20210108", "005930")
#df = pykrx.stock.get_market_fundamental_by_date("20200101", "20200430", "005930", freq="m")
print(df.head(2))
#--------------------
if False:
df = pykrx.stock.get_market_trading_value_by_date("20210115", "20210122", "005930")
#df = pykrx.stock.get_market_trading_value_by_date("20210115", "20210122", "005930", on="매도")
#df = pykrx.stock.get_market_trading_value_by_date("20210115", "20210122", "KOSPI")
#df = pykrx.stock.get_market_trading_value_by_date("20210115", "20210122", "KOSPI", etf=True, etn=True, elw=True)
#df = pykrx.stock.get_market_trading_value_by_date("20210115", "20210122", "KOSPI", etf=True, etn=True, elw=True, detail=True)
print(df.head(2))
#--------------------
if False:
df = pykrx.stock.get_market_trading_volume_by_date("20210115", "20210122", "005930")
#df = pykrx.stock.get_market_trading_volume_by_date("20210115", "20210122", "005930", on="매도")
#df = pykrx.stock.get_market_trading_volume_by_date("20210115", "20210122", "KOSPI")
#df = pykrx.stock.get_market_trading_volume_by_date("20210115", "20210122", "KOSPI", etf=True, etn=True, elw=True)
#df = pykrx.stock.get_market_trading_volume_by_date("20210115", "20210122", "KOSPI", etf=True, etn=True, elw=True, detail=True)
print(df.head())
#--------------------
if False:
df = pykrx.stock.get_market_trading_value_by_investor("20210115", "20210122", "005930")
#df = pykrx.stock.get_market_trading_value_by_investor("20210115", "20210122", "KOSPI")
#df = pykrx.stock.get_market_trading_value_by_investor("20210115", "20210122", "KOSPI", etf=True, etn=True, elw=True)
print(df.head())
#--------------------
if False:
df = pykrx.stock.get_market_trading_volume_by_investor("20210115", "20210122", "005930")
#df = pykrx.stock.get_market_trading_volume_by_investor("20210115", "20210122", "KOSPI")
#df = pykrx.stock.get_market_trading_volume_by_investor("20210115", "20210122", "KOSPI", etf=True, etn=True, elw=True)
print(df.head())
#--------------------
if False:
df = pykrx.stock.get_market_net_purchases_of_equities_by_ticker("20210115", "20210122", "KOSPI", "개인")
print(df.head())
#--------------------
if False:
df = pykrx.stock.get_market_cap_by_ticker("20200625")
print(df.head())
df = pykrx.stock.get_market_cap_by_date("20190101", "20190131", "005930")
#df = pykrx.stock.get_market_cap_by_date("20200101", "20200430", "005930", freq="m")
print(df.head())
#--------------------
if False:
df = pykrx.stock.get_exhaustion_rates_of_foreign_investment_by_ticker("20200703")
#df = pykrx.stock.get_exhaustion_rates_of_foreign_investment_by_ticker("20200703", "KOSPI")
#df = pykrx.stock.get_exhaustion_rates_of_foreign_investment_by_ticker("20200703", "KOSPI", balance_limit=True)
print(df.head())
df = pykrx.stock.get_exhaustion_rates_of_foreign_investment_by_date("20210108", "20210115", "005930")
print(df.head())
# REF [site] >> https://github.com/sharebook-kr/pykrx
def pykrx_index_example():
import pykrx.stock
tickers = pykrx.stock.get_index_ticker_list()
tickers = pykrx.stock.get_index_ticker_list("19800104")
tickers = pykrx.stock.get_index_ticker_list(market="KOSDAQ")
print(tickers)
for ticker in pykrx.stock.get_index_ticker_list():
print(ticker, pykrx.stock.get_index_ticker_name(ticker))
pdf = pykrx.stock.get_index_portfolio_deposit_file("1005")
print(len(pdf), pdf)
df = pykrx.stock.get_index_ohlcv_by_date("20190101", "20190228", "1028")
#df = pykrx.stock.get_index_ohlcv_by_date("20190101", "20190228", "1028", freq="m")
print(df.head(2))
df = pykrx.stock.get_index_listing_date("KOSPI")
print(df.head())
df = pykrx.stock.get_index_price_change_by_ticker("20200520", "20200527", "KOSDAQ")
print(df.head())
# REF [site] >> https://github.com/sharebook-kr/pykrx
def pykrx_short_stock_selling_example():
import pykrx.stock
df = pykrx.stock.get_shorting_status_by_date("20181210", "20181212", "005930")
print(df)
df = pykrx.stock.get_shorting_volume_by_ticker("20210125")
#df = pykrx.stock.get_shorting_volume_by_ticker("20210125", "KOSDAQ")
#df = pykrx.stock.get_shorting_volume_by_ticker("20210125", include=["주식", "ELW"])
print(df.head())
df = pykrx.stock.get_shorting_volume_by_date("20210104", "20210108", "005930")
print(df.head(3))
df = pykrx.stock.get_shorting_investor_volume_by_date("20190401", "20190405", "KOSPI")
#df = pykrx.stock.get_shorting_investor_volume_by_date("20190401", "20190405", "KOSDAQ")
print(df.head())
df = pykrx.stock.get_shorting_investor_value_by_date("20190401", "20190405", "KOSPI")
#df = pykrx.stock.get_shorting_investor_value_by_date("20190401", "20190405", "KOSDAQ")
print(df.head())
df = pykrx.stock.get_shorting_balance_by_date("20190401", "20190405", "005930")
print(df.head())
df = pykrx.stock.get_shorting_volume_top50("20210129")
#df = pykrx.stock.get_shorting_volume_top50("20210129", "KOSDAQ")
print(df.head())
df = pykrx.stock.get_shorting_balance_top50("20210127")
#df = pykrx.stock.get_shorting_balance_top50("20210129", market="KOSDAQ")
print(df.head())
# REF [site] >> https://github.com/sharebook-kr/pykrx
def pykrx_etx_example():
import pykrx.stock
#--------------------
# ETF.
tickers = pykrx.stock.get_etf_ticker_list("20200717")
print(tickers[:10])
tickers = pykrx.stock.get_etf_ticker_list("20021014")
for ticker in tickers:
print(pykrx.stock.get_etf_ticker_name(ticker))
df = pykrx.stock.get_etf_ohlcv_by_date("20210104", "20210108", "292340")
df = pykrx.stock.get_etf_ohlcv_by_date("20200101", "20200531", "292340", freq="m")
print(df.head())
df = pykrx.stock.get_etf_ohlcv_by_ticker("20210325")
print(df.head())
df = pykrx.stock.get_etf_price_change_by_ticker("20210325", "20210402")
print(df.head())
df = pykrx.stock.get_etf_portfolio_deposit_file("152100")
#df = pykrx.stock.get_etf_portfolio_deposit_file("152100", "20161206")
print(df.head())
df = pykrx.stock.get_etf_price_deviation("20200101", "20200401", "295820")
print(df.head())
df = pykrx.stock.get_etf_tracking_error("20210104", "20210108", "295820")
print(df.head())
#--------------------
# ETN.
tickers = pykrx.stock.get_etn_ticker_list("20141215")
print(tickers)
for ticker in tickers:
print(pykrx.stock.get_etn_ticker_name(ticker))
#--------------------
# ELW.
tickers = pykrx.stock.get_elw_ticker_list("20200306")
print(tickers)
for ticker in tickers:
print(pykrx.stock.get_elw_ticker_name(ticker))
# REF [site] >> https://github.com/sharebook-kr/pykrx
def pykrx_bond_example():
import pykrx.website.krx.bond
kb = pykrx.website.krx.bond.KrxBond()
df = kb.get_treasury_yields_in_kerb_market("20190208")
print(df)
def main():
#simple_example_1() # Not working.
#simple_example_2() # Not correctly working.
pykrx_market_data_example()
#pykrx_index_example()
#pykrx_short_stock_selling_example()
#pykrx_etx_example()
#pykrx_bond_example()
#--------------------------------------------------------------------
if "__main__" == __name__:
main()
| sangwook236/SWDT | sw_dev/python/rnd/test/finance/krx_test.py | Python | gpl-3.0 | 12,724 |
"""
@date 2014-11-16
@author Hong-She Liang <[email protected]>
"""
import io
import types
import time
import functools
import base64
import copy
import rabird.core.cstring as cstring
from PIL import Image
from selenium.common.exceptions import (
NoSuchElementException,
WebDriverException,
StaleElementReferenceException,
NoSuchFrameException,
)
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from rabird.selenium import expected_conditions as EC
from rabird.selenium import validators as V
from ..utils import merge_kwargs, verify_xpath, get_current_func
def _execute_with_switch_frame(self, function):
if hasattr(self, "_parent_frame_path") and (
len(self._parent_frame_path) > 0
):
self._parent.switch_to.default_content()
try:
self._parent.switch_to.frame(self._parent_frame_path)
# Try to scroll element to view before execute any function
_do_scroll_into_view(self)
result = function()
finally:
self._parent.switch_to.default_content()
else:
# Try to scroll element to view before execute any function
_do_scroll_into_view(self)
result = function()
return result
def get_attribute(self, name):
function = functools.partial(self._old_get_attribute, name)
return _execute_with_switch_frame(self, function)
def set_attribute(self, name, value):
value = cstring.escape(value)
script = "arguments[0].setAttribute('%s', '%s');" % (name, value)
function = functools.partial(self._parent.execute_script, script, self)
_execute_with_switch_frame(self, function)
return self
def __get_driver(self):
if isinstance(self, WebDriver):
driver = self
else:
driver = self._parent
return driver
def _xpath_find_decl(
value=None,
validators=None,
is_find_all=False,
parent_frame_path=None,
**kwargs
):
"""Only xpath parameters declaration of xpath_find related function.
"""
pass
def xpath_find(self, *args, **kwargs):
return self.find_element_recursively(
By.XPATH, *args, is_find_all=False, **kwargs
)[0]
def xpath_find_all(self, *args, **kwargs):
return self.find_element_recursively(
By.XPATH, *args, is_find_all=True, **kwargs
)
def xpath_wait(self, *args, **kwargs):
"""
A simple method provided for wait specific xpath expression appear.
"""
if "timeout" in kwargs:
timeout = kwargs["timeout"]
del kwargs["timeout"]
else:
timeout = __get_driver(self).get_xpath_wait_timeout()
# Because WebDriverWait() will ignore all exceptions even
# InvalidSelectorException, so we will check xpath pattern if valid first.
# If xpath pattern verify failed, we must not go into looping and report
# to user.
merged_kwargs = merge_kwargs(_xpath_find_decl, args, kwargs)
verify_xpath(merged_kwargs["value"])
return WebDriverWait(__get_driver(self), timeout).until(
EC.xpath_find(*args, **kwargs)
)
def xpath_wait_all(self, *args, **kwargs):
if "timeout" in kwargs:
timeout = kwargs["timeout"]
del kwargs["timeout"]
else:
timeout = __get_driver(self).get_xpath_wait_timeout()
# Because WebDriverWait() will ignore all exceptions even
# InvalidSelectorException, so we will check xpath pattern if valid first.
# If xpath pattern verify failed, we must not go into looping and report
# to user.
merged_kwargs = merge_kwargs(_xpath_find_decl, args, kwargs)
verify_xpath(merged_kwargs["value"])
return WebDriverWait(__get_driver(self), timeout).until(
EC.xpath_find_all(*args, **kwargs)
)
def _force_hover(self):
hover = ActionChains(self._parent).move_to_element(self)
hover.perform()
return self
def force_hover(self):
function = functools.partial(_force_hover, self)
_execute_with_switch_frame(self, function)
return self
def force_focus(self):
function = functools.partial(
self._parent.execute_script, "arguments[0].focus();", self
)
_execute_with_switch_frame(self, function)
return self
def force_click(self):
function = functools.partial(
self._parent.execute_script, "arguments[0].click();", self
)
_execute_with_switch_frame(self, function)
return self
def _do_scroll_into_view(self):
self._parent.execute_script("arguments[0].scrollIntoView(true);", self)
def scroll_into_view(self):
function = functools.partial(_do_scroll_into_view, self)
_execute_with_switch_frame(self, function)
return self
def _execute(self, command, params=None):
function = functools.partial(self._old_execute, command, params)
return _execute_with_switch_frame(self, function)
def _filter_elements(driver, elements, validators):
"""Becareful that this method will not switch to it's frame ! So you must
ensure you are in the correct frame currently.
"""
# Only filter elements if validators not empty!
if (
(validators is None)
or (isinstance(validators, list) and (len(validators) <= 0))
or (not elements)
):
return elements
result = []
# Only do filter behavior if
for element in elements:
for validator in validators:
if validator(element):
result.append(element)
return result
def _find_element_recursively(
self,
by=By.ID,
value=None,
validators=None,
is_find_all=False,
parent_frame_path=None,
**kwargs
):
"""Recursively to find elements.
@param validators: Only accept validators.
@return Return element list while successed, otherwise raise exception
NoSuchElementException .
"""
if parent_frame_path is None:
parent_frame_path = list()
if validators is None:
validators = V.And()
elif not isinstance(validators, V.Operator):
validators = V.And(*validators)
if isinstance(self, WebDriver):
driver = self
else:
driver = self._parent
# If "self" is an element and parent_frame_path do not have any
# elements, we should inhert the frame path from "self".
if hasattr(self, "_parent_frame_path") and (
len(parent_frame_path) <= 0
):
parent_frame_path = self._parent_frame_path
# Initialize first frame path to current window handle
if len(parent_frame_path) <= 0:
parent_frame_path += [driver.current_window_handle]
else:
# FIXME I don't know why, but it can find the iframe even
# we switched into that iframe??? Seems that switch behavior
# failed!
iframe_elements = self.find_elements(By.TAG_NAME, "iframe")
if parent_frame_path[-1] in iframe_elements:
raise NoSuchElementException()
try:
last_exception = NoSuchElementException(
"by: %s, value: %s" % (by, value)
)
founded_elements = []
try:
if is_find_all:
founded_elements = self.find_elements(by, value)
else:
founded_elements = [self.find_element(by, value)]
founded_elements = _filter_elements(
driver, founded_elements, validators
)
for element in founded_elements:
element._parent_frame_path = parent_frame_path
except NoSuchElementException as e:
last_exception = e
# If it only need one element ...
if is_find_all or (len(founded_elements) <= 0):
# You must invoke self's old find elements method, so that it could search
# in the element not spread all over the whole HTML.
try:
elements = []
elements = self.find_elements(By.TAG_NAME, "iframe")
except WebDriverException:
# If window is switching or not ready, WebDriverException will
# happen.
pass
for element in elements:
try:
temporary_frame_path = parent_frame_path + [element]
driver.switch_to.frame(temporary_frame_path)
try:
# Here must use driver to find elements, because now it already
# switched into the frame, so we need to search the whole frame
# area.
founded_elements += _find_element_recursively(
self,
by,
value,
validators,
is_find_all,
temporary_frame_path,
**kwargs
)
if not is_find_all:
break
except NoSuchElementException as e:
last_exception = e
except (
StaleElementReferenceException,
NoSuchFrameException,
) as e:
# Sometimes, we will met staled or none iframe event found
# the 'iframe' element before.
print(
"Can't find stale iframe : %s! Current Window Handle : %s"
% (temporary_frame_path, driver.current_window_handle)
)
last_exception = e
if (not is_find_all) and (len(founded_elements) <= 0):
# Can't find any element, we raise the last exception if
# we only want to find one element !
raise last_exception
return founded_elements
finally:
# Avoid stay in the specific frame after last find_element().
driver.switch_to.default_content()
def _has_visible_validator(validators):
for validator in validators:
if not isinstance(validator, V.Operator):
if isinstance(validator, V.VisibleOf):
return True
continue
if _has_visible_validator(validator):
return True
return False
def find_element_recursively(
self,
by=By.ID,
value=None,
validators=[],
is_find_all=False,
*args,
**kwargs
):
if isinstance(self, WebDriver):
driver = self
else:
driver = self._parent
if not _has_visible_validator(validators):
# By default, we only check visible elements
# Think about it, most behaviors are done on visible elements not
# the hiden elements !
validators.append(V.Visible())
founded_elements = []
# Recursive into windows
last_exception = NoSuchElementException()
old_handle = driver.current_window_handle
try:
handles = driver.window_handles
for handle in handles:
driver.switch_to.window(handle)
try:
founded_elements += _find_element_recursively(
self, by, value, validators, is_find_all, *args, **kwargs
)
if (not is_find_all) and (len(founded_elements) > 0):
break
except NoSuchElementException as e:
# Continue searching if there does not have element in specific
# window.
last_exception = e
finally:
driver.switch_to.window(old_handle)
if (len(founded_elements) <= 0) and (not is_find_all):
# If no one have any elements, we should raise last exception (There
# must be someone raised that exception!)
raise last_exception
return founded_elements
def remove(self):
script = """
var element = arguments[0];
element.parentNode.removeChild(element);
"""
function = functools.partial(self._parent.execute_script, script, self)
_execute_with_switch_frame(self, function)
def get_rect(self):
"""
Emulated rect property for all webdriver.
Original method will raise unknow command exception in most webdrivers.
"""
rect = copy.deepcopy(self.location)
rect.update(self.size)
return rect
def get_absolute_location(self):
"""
Get element's location relate to the whole web page
Original location property only get the location related to frame which
containing it.
"""
location = self.location
if hasattr(self, "_parent_frame_path") and (
len(self._parent_frame_path) > 0
):
last_frame = self._parent_frame_path[-1]
frame_list = self._parent_frame_path[:-1]
# Sum up parent frames' locations (frame's location also related to
# which frame containing it.)
count = len(frame_list)
for i in range(count, 0, -1):
self.parent.switch_to.frame(frame_list[:i])
frame_location = last_frame.location
location["x"] += frame_location["x"]
location["y"] += frame_location["y"]
last_frame = frame_list[-1]
return location
def screenshot_as_base64(self):
"""
An emulated element screenshot method.
Original screenshot_as_base64() and screenshot_as_png() is new features that
not be supported by most webdrivers. So we provided a complicated way to
achieve the same goal with same interface. Hope it will support all
webdrivers.
"""
self.scroll_into_view()
image_data = self.parent.get_screenshot_as_png()
location = get_absolute_location(self)
# Compatible way to get scroll x and y.
# Reference to :
# https://developer.mozilla.org/en-US/docs/Web/API/Window/scrollX
scroll_x = self.parent.execute_script(
"return (window.pageXOffset !== undefined) ? window.pageXOffset : ("
"document.documentElement || "
"document.body.parentNode || "
"document.body).scrollLeft;"
)
scroll_y = self.parent.execute_script(
"return (window.pageYOffset !== undefined) ? window.pageYOffset : ("
"document.documentElement || "
"document.body.parentNode || "
"document.body).scrollTop;"
)
size = self.size
image = Image.open(io.BytesIO(image_data))
left = location["x"] - scroll_x
# FIXME: Why subtract with 150? No, don't ask me, it just works!
top = location["y"] - scroll_y - 150
right = left + size["width"]
bottom = top + size["height"]
stream = io.BytesIO()
image = image.crop((int(left), int(top), int(right), int(bottom)))
image.save(stream, format="PNG")
return base64.b64encode(stream.getvalue()).decode("ascii")
| starofrainnight/rabird.selenium | rabird/selenium/overrides/webelement.py | Python | apache-2.0 | 14,878 |
#!/usr/bin/env python
#
# This work is licensed under the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
# create.py: Create a new bug report
from __future__ import print_function
import time
import bugzilla
# public test instance of bugzilla.redhat.com.
#
# Don't worry, changing things here is fine, and won't send any email to
# users or anything. It's what partner-bugzilla.redhat.com is for!
URL = "partner-bugzilla.redhat.com"
bzapi = bugzilla.Bugzilla(URL)
if not bzapi.logged_in:
print("This example requires cached login credentials for %s" % URL)
bzapi.interactive_login()
# Similar to build_query, build_createbug is a helper function that handles
# some bugzilla version incompatibility issues. All it does is return a
# properly formatted dict(), and provide friendly parameter names.
# The argument names map to those accepted by XMLRPC Bug.create:
# https://bugzilla.readthedocs.io/en/latest/api/core/v1/bug.html#create-bug
#
# The arguments specified here are mandatory, but there are many other
# optional ones like op_sys, platform, etc. See the docs
createinfo = bzapi.build_createbug(
product="Fedora",
version="rawhide",
component="python-bugzilla",
summary="new example python-bugzilla bug %s" % time.time(),
description="This is comment #0 of an example bug created by "
"the python-bugzilla.git examples/create.py script.")
newbug = bzapi.createbug(createinfo)
print("Created new bug id=%s url=%s" % (newbug.id, newbug.weburl))
| wgwoods/python-bugzilla | examples/create.py | Python | gpl-2.0 | 1,528 |
import numpy as np
from sympy import Rational as frac
from sympy import sqrt
from ..helpers import article, expand_symmetries, fsd, untangle, z
from ._helpers import CnScheme
_source = article(
authors=["Preston C. Hammer", "Arthur H. Stroud"],
title="Numerical Evaluation of Multiple Integrals II",
journal="Math. Comp.",
volume="12",
year="1958",
pages="272-280",
url="https://doi.org/10.1090/S0025-5718-1958-0102176-6",
)
def hammer_stroud_1n(n):
d = {"a0": [[frac(1, 2 * n)], [sqrt(frac(n, 3))]]}
points, weights = expand_symmetries(d, n)
return CnScheme("Hammer-Stroud 1n", n, weights, points, 3, _source, 5.863e-14)
def hammer_stroud_2n(n):
r = sqrt(frac(3, 5))
data = [
(frac(25 * n ** 2 - 115 * n + 162, 162), z(n)),
(frac(70 - 25 * n, 162), fsd(n, (r, 1))),
(frac(25, 324), fsd(n, (r, 2))),
]
points, weights = untangle(data)
points = np.ascontiguousarray(points.T)
return CnScheme("Hammer-Stroud 2n", n, weights, points, 5, _source, 3.820e-14)
| nschloe/quadpy | src/quadpy/cn/_hammer_stroud.py | Python | mit | 1,048 |
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Unit tests for deposit/record minters."""
from __future__ import absolute_import, print_function
from uuid import uuid4
import pytest
from invenio_pidstore.errors import PIDDoesNotExistError
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from zenodo.modules.deposit.minters import zenodo_deposit_minter
from zenodo.modules.records.minters import zenodo_record_minter
def test_double_minting_depid_recid(db):
"""Test using same integer for dep/rec ids."""
dep_uuid = uuid4()
data = dict()
pid = zenodo_deposit_minter(dep_uuid, data)
# Assert values added to data
assert data['_deposit']['id'] == '1'
assert data['recid'] == 1
assert 'doi' not in data
# Assert pid values
assert pid.pid_type == 'depid'
assert pid.pid_value == '1'
assert pid.status == PIDStatus.REGISTERED
assert pid.object_uuid == dep_uuid
# Assert reservation of recid.
assert PersistentIdentifier.get('recid', pid.pid_value).status \
== PIDStatus.RESERVED
db.session.commit()
# Assert registration of recid.
rec_uuid = uuid4()
pid = zenodo_record_minter(rec_uuid, data)
assert pid.pid_type == 'recid'
assert pid.pid_value == '1'
assert pid.status == PIDStatus.REGISTERED
assert pid.object_uuid == rec_uuid
assert data['doi'] == '10.5072/zenodo.1'
assert data['_oai']['id'] == 'oai:zenodo.org:1'
@pytest.mark.parametrize('doi_in, doi_out', [
# ('10.1234/foo', '10.1234/foo'),
# ('10.5072/foo', '10.5072/foo'),
(None, '10.5072/zenodo.1'),
])
def test_doi_minting(db, doi_in, doi_out):
"""Test using same integer for dep/rec ids."""
dep_uuid, rec_uuid = uuid4(), uuid4()
data = dict(doi=doi_in)
zenodo_deposit_minter(dep_uuid, data)
zenodo_record_minter(rec_uuid, data)
db.session.commit()
pid = PersistentIdentifier.get('doi', doi_out)
assert pid.object_uuid == rec_uuid
assert pid.status == PIDStatus.RESERVED
@pytest.mark.parametrize('doi', [
'1234/foo',
'a',
])
def test_invalid_doi(db, doi):
"""Test using same integer for dep/rec ids."""
dep_uuid = uuid4()
data = dict(doi=doi)
zenodo_deposit_minter(dep_uuid, data)
assert PersistentIdentifier.query.count() == 2
def test_unpublished_deposit_and_pid_deletion(deposit):
"""Test deletion of deposit and pid."""
recid = PersistentIdentifier.get('recid', str(deposit['recid']))
assert recid and recid.status == PIDStatus.RESERVED
assert not recid.has_object()
depid = PersistentIdentifier.get('depid', str(deposit['_deposit']['id']))
assert depid and depid.status == PIDStatus.REGISTERED
assert depid.has_object()
# Delete deposit
deposit.delete()
pytest.raises(
PIDDoesNotExistError,
PersistentIdentifier.get,
'recid', str(deposit['recid'])
)
depid = PersistentIdentifier.get('depid', str(deposit['_deposit']['id']))
assert depid and depid.status == PIDStatus.DELETED
def test_published_external_doi(db, deposit, deposit_file):
"""Test published external DOI."""
ext_doi1 = '10.1234/foo'
ext_doi2 = '10.1234/bar'
deposit['doi'] = ext_doi1
deposit.publish()
db.session.commit()
# Published record with external DOI must have:
# 1) a registered recid with object
recid = PersistentIdentifier.get('recid', str(deposit['recid']))
assert recid and recid.status == PIDStatus.REGISTERED \
and recid.has_object()
# 2) a reserved external doi with object
doi = PersistentIdentifier.get('doi', ext_doi1)
assert doi and doi.status == PIDStatus.RESERVED \
and doi.has_object()
# Now change external DOI.
deposit = deposit.edit()
deposit['doi'] = ext_doi2
deposit.publish()
db.session.commit()
# Ensure DOI 1 has been removed.
pytest.raises(
PIDDoesNotExistError, PersistentIdentifier.get, 'doi', ext_doi1)
# Ensure DOI 2 has been reserved.
doi = PersistentIdentifier.get('doi', ext_doi2)
assert doi and doi.status == PIDStatus.RESERVED \
and doi.has_object()
| tiborsimko/zenodo | tests/unit/deposit/test_minters.py | Python | gpl-2.0 | 4,994 |
import gzip
import math
import numpy as np
class Renderer:
def reset(self):
self.fmt = getvar('pixel_buffer_format')
self.width = int(getvar('pixel_buffer_width'))
self.height = int(getvar('pixel_buffer_height'))
self.period = getvar('period')
with gzip.open('background.gz') as fp:
self.bg = np.frombuffer(fp.read(), dtype=np.uint8)
if self.fmt in ('RGB8', 'sRGB8'):
self.bg = self.bg[np.arange(self.bg.shape[0]) % 4 != 3]
self.bg = np.frombuffer(self.bg, dtype=np.dtype('B, B, B'))
elif self.fmt in ('RGBA8', 'sRGBA8'):
self.bg = np.frombuffer(self.bg, dtype=np.dtype('B, B, B, B'))
elif self.fmt == 'RGBA16F':
self.bg = self.bg.astype(np.float16)
self.bg /= 255.0
self.bg = np.frombuffer(self.bg, dtype=np.dtype('f2, f2, f2, f2'))
else:
raise RuntimeError('Invalid buffer format: %r' % self.fmt)
self.bg = self.bg.reshape((self.height, self.width))
def render(self):
elapsed_time = getvar('elapsed_time')
pos = 0.5 * (1.0 - math.cos(2.0 * math.pi * elapsed_time / self.period))
return np.roll(self.bg,
(round(self.height * pos), round(self.width * pos)),
(0, 1))
r = Renderer()
reset = r.reset
render = r.render
del r
| mworks/mworks | examples/Tests/Stimulus/PythonImage/image_gen.py | Python | mit | 1,385 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import json
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pybossa-discourse'
copyright = '2016, Alexander Mendes'
author = 'Alexander Mendes'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version = json.load(open('../pybossa_discourse/info.json'))['version']
# The short X.Y version.
version = version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pybossa-discoursedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pybossa-discourse.tex', 'pybossa-discourse Documentation',
'Alexander Mendes', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pybossa-discourse', 'pybossa-discourse Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pybossa-discourse', 'pybossa-discourse Documentation',
author, 'pybossa-discourse', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | alexandermendes/pybossa-discourse | docs/conf.py | Python | bsd-3-clause | 9,364 |
# -*- coding: utf-8 -*-
from django.conf.urls import include, url
from django.contrib import admin
from DingDingCater import settings
import django.views.static as ds
from blog.views import page_index, page_article, page_category, ajax_praiseCnt, upload_image
urlpatterns = [
# static file router
# after inserting pic in kindeditor, kindeditor will request pic through this url.
url(r"^uploads/(?P<path>.*)$", ds.serve, {"document_root": settings.MEDIA_ROOT, }),
# kindeditor uploads pic to this url
# get named group 'dir_name',pass to upload_image()
# here 'admin/uploads/' must be same to 'uploadJson' value defined in /static/js/kindeditor-4.1.11-en/config.js
url(r'admin/uploads/(?P<dirNameUnderUpload>[^/]+)', upload_image),
url(r'^category/$', page_category),
url(r'^uploadPraise/$', ajax_praiseCnt),
url(r'^article/$', page_article, name='article'), # ——> {% url 'article' %}
url(r'^$', page_index),
url(r'^admin/', include(admin.site.urls)),
]
| hippowon/DingDingCater | DingDingCater/urls.py | Python | gpl-3.0 | 1,016 |
#!/usr/bin/python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
"""
Get word, character, and Asian character counts
1. Get a word count as a dictionary:
wc = get_wordcount(text)
words = wc['words'] # etc.
2. Get a word count as an object
wc = get_wordcount_obj(text)
words = wc.words # etc.
properties counted:
* characters
* chars_no_spaces
* asian_chars
* non_asian_words
* words
Sourced from:
http://ginstrom.com/scribbles/2008/05/17/counting-words-etc-in-an-html-file-with-python/
http://ginstrom.com/scribbles/2007/10/06/counting-words-characters-and-asian-characters-with-python/
"""
__version__ = 0.1
__author__ = "Ryan Ginstrom"
IDEOGRAPHIC_SPACE = 0x3000
def is_asian(char):
"""Is the character Asian?"""
# 0x3000 is ideographic space (i.e. double-byte space)
# Anything over is an Asian character
return ord(char) > IDEOGRAPHIC_SPACE
def filter_jchars(c):
"""Filters Asian characters to spaces"""
if is_asian(c):
return ' '
return c
def nonj_len(word):
u"""Returns number of non-Asian words in {word}
- 日本語AアジアンB -> 2
- hello -> 1
@param word: A word, possibly containing Asian characters
"""
# Here are the steps:
# 本spam日eggs
# -> [' ', 's', 'p', 'a', 'm', ' ', 'e', 'g', 'g', 's']
# -> ' spam eggs'
# -> ['spam', 'eggs']
# The length of which is 2!
chars = [filter_jchars(c) for c in word]
return len(u''.join(chars).split())
def get_wordcount(text):
"""Get the word/character count for text
@param text: The text of the segment
"""
characters = len(text)
chars_no_spaces = sum([not x.isspace() for x in text])
asian_chars = sum([is_asian(x) for x in text])
non_asian_words = nonj_len(text)
words = non_asian_words + asian_chars
return dict(characters=characters,
chars_no_spaces=chars_no_spaces,
asian_chars=asian_chars,
non_asian_words=non_asian_words,
words=words)
def dict2obj(dictionary):
"""Transform a dictionary into an object"""
class Obj(object):
def __init__(self, dictionary):
self.__dict__.update(dictionary)
return Obj(dictionary)
def get_wordcount_obj(text):
"""Get the wordcount as an object rather than a dictionary"""
return dict2obj(get_wordcount(text))
| ashang/calibre | src/calibre/utils/wordcount.py | Python | gpl-3.0 | 2,390 |
from electrum_rby.plugins import BasePlugin, hook
from electrum_rby_gui.qt.util import WaitingDialog, EnterButton
from electrum_rby.util import print_msg, print_error
from electrum_rby.i18n import _
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import traceback
import zlib
import json
from io import BytesIO
import sys
import platform
try:
import amodem.audio
import amodem.main
import amodem.config
print_error('Audio MODEM is available.')
amodem.log.addHandler(amodem.logging.StreamHandler(sys.stderr))
amodem.log.setLevel(amodem.logging.INFO)
except ImportError:
amodem = None
print_error('Audio MODEM is not found.')
class Plugin(BasePlugin):
def __init__(self, config, name):
BasePlugin.__init__(self, config, name)
if self.is_available():
self.modem_config = amodem.config.slowest()
self.library_name = {
'Linux': 'libportaudio.so'
}[platform.system()]
def is_available(self):
return amodem is not None
def requires_settings(self):
return True
def settings_widget(self, window):
return EnterButton(_('Settings'), self.settings_dialog)
def settings_dialog(self):
d = QDialog()
d.setWindowTitle("Settings")
layout = QGridLayout(d)
layout.addWidget(QLabel(_('Bit rate [kbps]: ')), 0, 0)
bitrates = list(sorted(amodem.config.bitrates.keys()))
def _index_changed(index):
bitrate = bitrates[index]
self.modem_config = amodem.config.bitrates[bitrate]
combo = QComboBox()
combo.addItems(map(str, bitrates))
combo.currentIndexChanged.connect(_index_changed)
layout.addWidget(combo, 0, 1)
ok_button = QPushButton(_("OK"))
ok_button.clicked.connect(d.accept)
layout.addWidget(ok_button, 1, 1)
return bool(d.exec_())
@hook
def transaction_dialog(self, dialog):
b = QPushButton()
b.setIcon(QIcon(":icons/speaker.png"))
def handler():
blob = json.dumps(dialog.tx.as_dict())
self.sender = self._send(parent=dialog, blob=blob)
self.sender.start()
b.clicked.connect(handler)
dialog.buttons.insert(0, b)
@hook
def scan_text_edit(self, parent):
def handler():
self.receiver = self._recv(parent=parent)
self.receiver.start()
parent.addButton(':icons/microphone.png', handler, _("Read from microphone"))
@hook
def show_text_edit(self, parent):
def handler():
blob = str(parent.toPlainText())
self.sender = self._send(parent=parent, blob=blob)
self.sender.start()
parent.addButton(':icons/speaker.png', handler, _("Send to speaker"))
def _audio_interface(self):
interface = amodem.audio.Interface(config=self.modem_config)
return interface.load(self.library_name)
def _send(self, parent, blob):
def sender_thread():
try:
with self._audio_interface() as interface:
src = BytesIO(blob)
dst = interface.player()
amodem.main.send(config=self.modem_config, src=src, dst=dst)
except Exception:
traceback.print_exc()
print_msg('Sending:', repr(blob))
blob = zlib.compress(blob)
kbps = self.modem_config.modem_bps / 1e3
msg = 'Sending to Audio MODEM ({0:.1f} kbps)...'.format(kbps)
return WaitingDialog(parent=parent, message=msg, run_task=sender_thread)
def _recv(self, parent):
def receiver_thread():
try:
with self._audio_interface() as interface:
src = interface.recorder()
dst = BytesIO()
amodem.main.recv(config=self.modem_config, src=src, dst=dst)
return dst.getvalue()
except Exception:
traceback.print_exc()
def on_success(blob):
if blob:
blob = zlib.decompress(blob)
print_msg('Received:', repr(blob))
parent.setText(blob)
kbps = self.modem_config.modem_bps / 1e3
msg = 'Receiving from Audio MODEM ({0:.1f} kbps)...'.format(kbps)
return WaitingDialog(parent=parent, message=msg,
run_task=receiver_thread, on_success=on_success)
| justinvforvendetta/electrum-rby | plugins/audio_modem.py | Python | gpl-3.0 | 4,480 |
from direct.gui.OnscreenImage import OnscreenImage
from panda3d.core import TransparencyAttrib
from direct.gui.DirectGui import DirectButton
from core.game import Game
import sys
class MainMenu:
def __init__(self):
self.background = None
self.newGameButton = None
self.optionsButton = None
self.exitButton = None
def drawMainMenu(self):
x = self.win.getXSize()
y = self.win.getYSize()
self.background = OnscreenImage(image='textures/main_menu.png')
self.background.setSx(x / y)
clickNewGameButton = lambda: self.push(Game())
clickOptionsButton = lambda: self.push('Options')
clickExitButton = lambda: sys.exit()
def setButtonAttributes(button):
button.setSx(.60)
button.setSz(.26)
button.setTransparency(TransparencyAttrib.MAlpha)
maps = loader.loadModel('textures/continue_maps.egg')
geom = (maps.find('**/continue'),
maps.find('**/continue_click'),
maps.find('**/continue_hover'))
self.newGameButton = DirectButton(geom=geom, relief=None,
command=clickNewGameButton)
setButtonAttributes(self.newGameButton)
self.newGameButton.setPos(0, 0, .6)
maps = loader.loadModel('textures/options_maps.egg')
geom = (maps.find('**/options'),
maps.find('**/options_click'),
maps.find('**/options_hover'))
self.optionsButton = DirectButton(geom=geom, relief=None,
command=clickOptionsButton)
setButtonAttributes(self.optionsButton)
self.optionsButton.setPos(0, 0, .36)
maps = loader.loadModel('textures/exit_maps.egg')
geom = (maps.find('**/exit'),
maps.find('**/exit_click'),
maps.find('**/exit_hover'))
self.exitButton = DirectButton(geom=geom, relief=None,
command=clickExitButton)
setButtonAttributes(self.exitButton)
self.exitButton.setPos(0, 0, .12)
self.hasDrawnMainMenu = True
def destroyMainMenu(self):
self.background.destroy()
self.newGameButton.destroy()
self.optionsButton.destroy()
self.exitButton.destroy()
self.hasDrawnMainMenu = False
| mzdravkov/comori | main_menu.py | Python | mit | 2,386 |
# -*- coding: UTF-8 -*-
import datetime
from django.core.mail import EmailMessage
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse_lazy
from django.views.generic import FormView, TemplateView, ListView, DetailView
from .forms import RegForm
from .models import Event, Person, Registration
from django.http import HttpResponse
from django.core.mail import send_mail, BadHeaderError, EmailMultiAlternatives
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib import messages
from django.utils import timezone
class RegView(FormView):
form_class = RegForm
template_name = 'hhlregistrations/register.html'
success_url = reverse_lazy('registrations:regok_generic')
def get_context_data(self, **kwargs):
context = super(RegView, self).get_context_data(**kwargs)
context['event'] = get_object_or_404(Event, pk=self.kwargs['event_id'])
context['show_form'] = True
context['registration_closed'] = False
if ( context['event'].close_registrations
and timezone.now() > context['event'].close_registrations): # timezone!
context['registration_closed'] = True
context['show_form'] = False
context['waiting_list'] = False
if ( context['event'].max_registrations > 0
and Registration.objects.filter(state__in=('AC', 'CC')).count() >= context['event'].max_registrations):
context['waiting_list'] = True
context['show_optional'] = False
context['show_join'] = True
if context['event'].hide_join_checkbox:
context['show_join'] = False
context['show_materials'] = False
if ( context['event'].materials_cost
and not context['event'].materials_mandatory):
context['show_materials'] = True
# Hide the whole optional section if we have nothing to show there
if True not in (context['show_join'], context['show_materials']):
context['show_optional'] = False
return context
def form_valid(self, form):
context = self.get_context_data()
data = form.cleaned_data
person, created = Person.objects.get_or_create(email=data['email'],
defaults={
'first_name': data['first_name'],
'last_name': data['last_name'],
}
)
# Just in case someone manages to sneak a double reg through the form
registration, created = Registration.objects.get_or_create(person=person, event=context['event'],
defaults={
'state': 'AC',
'wants_materials': data['wants_materials'],
}
)
if data['join']:
mail = EmailMessage()
mail.from_email = person.formatted_email
# TODO: read this from settings
mail.to = ['[email protected]', person.formatted_email]
mail.subject = u'Jäsenhakemus (ilmoittautumislomakkeelta)'
mail.body = """
Nimi: {lname}, {fname}
Paikkakunta: {city}
Haen jäseneksi, hyväksyn Helsinki Hacklab ry:n säännöt.
""".format(fname=person.first_name, lname=person.last_name, city=data['city']).strip()
# TODO: Do not ignore, catch the error and tell the user to send the mail themself
mail.send(True)
return super(RegView, self).form_valid(form)
class RegOKView(TemplateView):
template_name = 'hhlregistrations/register_ok.html'
class ListDetailMixin(object):
def get_context_data(self, **kwargs):
return super(ListDetailMixin, self).get_context_data(**kwargs)
# could need AdminPlus for showing on the admin main page, for now, use URL /admin/reg_sum/
class Summary(ListDetailMixin, ListView, DetailView):
context_object_name = 'reg_sum'
template_name = 'hhlregistrations/summary.html'
queryset = Event.objects.all()
slug_field = 'event_slug'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(Summary, self).get(self, request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
print(self.object)
return self.send_email(request, *args, **kwargs)
def get_object(self, queryset=None):
try:
sel_event = Event.objects.get(uuid=self.kwargs['slug'])
print(self.kwargs['slug'])
except:
sel_event = None
return sel_event
def send_email(self, request, *args, **kwargs):
subject = request.POST.get('subject', '')
message = request.POST.get('message', '')
from_email = request.POST.get('reply_to', '')
extra_cc = [request.POST.get('extra_recipient', '')]
bcc_to = []
participants = self.object.getParticipants()
for r in participants:
bcc_to.append(r.person.email)
msg = EmailMultiAlternatives(subject, message, from_email, [], bcc=bcc_to, cc=extra_cc)
print(bcc_to)
print(msg)
if subject and message and from_email:
try:
msg.send()
except BadHeaderError:
return HttpResponse('Invalid header found.')
messages.add_message(request, messages.INFO, 'Lähetetty viesti: "' + message + ' --- Vastaanottajille: ' + ' '.join(bcc_to) +' '+ ' '.join(extra_cc))
return super(Summary, self).get(self, request, *args, **kwargs)
else:
return HttpResponse('Make sure all fields are entered and valid.')
| hacklab-fi/hhlevents | hhlevents/apps/hhlregistrations/views.py | Python | bsd-3-clause | 5,623 |
from c2corg_api.models.article import Article, ArchiveArticle, ARTICLE_TYPE
from c2corg_api.models.document import DocumentLocale, ArchiveDocumentLocale, \
DOCUMENT_TYPE
from c2corg_api.scripts.migration.documents.document import MigrateDocuments, \
DEFAULT_QUALITY
from c2corg_api.scripts.migration.documents.routes import MigrateRoutes
class MigrateArticles(MigrateDocuments):
def get_name(self):
return 'articles'
def get_model_document(self, locales):
return DocumentLocale if locales else Article
def get_model_archive_document(self, locales):
return ArchiveDocumentLocale if locales else ArchiveArticle
def get_count_query(self):
return (
' select count(*) '
' from app_articles_archives aa join articles t on aa.id = t.id '
' where t.redirects_to is null;'
)
def get_query(self):
return (
' select '
' aa.id, aa.document_archive_id, aa.is_latest_version, '
' aa.is_protected, aa.redirects_to, '
' aa.elevation, aa.categories, aa.activities, aa.article_type '
' from app_articles_archives aa join articles t on aa.id = t.id '
' where t.redirects_to is null '
' order by aa.id, aa.document_archive_id;'
)
def get_count_query_locales(self):
return (
' select count(*) '
' from app_articles_i18n_archives aa '
' join articles t on aa.id = t.id '
' where t.redirects_to is null;'
)
def get_query_locales(self):
return (
' select '
' aa.id, aa.document_i18n_archive_id, aa.is_latest_version, '
' aa.culture, aa.name, aa.description '
' from app_articles_i18n_archives aa '
' join articles t on aa.id = t.id '
' where t.redirects_to is null '
' order by aa.id, aa.culture, aa.document_i18n_archive_id;'
)
def get_document(self, document_in, version):
categories = self.convert_types(
document_in.categories, MigrateArticles.article_categories)
if 'draft' in categories:
default_quality = 'draft'
categories.remove('draft')
else:
default_quality = DEFAULT_QUALITY
return dict(
document_id=document_in.id,
type=ARTICLE_TYPE,
version=version,
quality=default_quality,
categories=categories,
activities=self.convert_types(
document_in.activities, MigrateRoutes.activities),
article_type=self.convert_type(
document_in.article_type, MigrateArticles.article_types)
)
def get_document_locale(self, document_in, version):
description = self.convert_tags(document_in.description)
description, summary = self.extract_summary(description)
return dict(
document_id=document_in.id,
id=document_in.document_i18n_archive_id,
type=DOCUMENT_TYPE,
version=version,
lang=document_in.culture,
title=document_in.name,
description=description,
summary=summary
)
article_types = {
'1': 'collab',
'2': 'personal'
}
article_categories = {
'1': 'mountain_environment',
'2': 'gear',
'11': 'technical',
'4': 'topoguide_supplements',
'7': 'soft_mobility',
'8': 'expeditions',
'3': 'stories',
'9': 'c2c_meetings',
'10': 'tags',
'5': 'site_info',
'6': 'association',
'100': None
}
| c2corg/v6_api | c2corg_api/scripts/migration/documents/articles.py | Python | agpl-3.0 | 3,724 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-01 22:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0050_userprofile_avatar_version'),
('analytics', '0007_remove_interval'),
]
operations = [
migrations.AlterIndexTogether(
name='realmcount',
index_together=set([('property', 'end_time')]),
),
migrations.AlterIndexTogether(
name='streamcount',
index_together=set([('property', 'realm', 'end_time')]),
),
migrations.AlterIndexTogether(
name='usercount',
index_together=set([('property', 'realm', 'end_time')]),
),
]
| Galexrt/zulip | analytics/migrations/0008_add_count_indexes.py | Python | apache-2.0 | 744 |
#!/usr/bin/env python
#
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cloudify import ctx
from cloudify.state import ctx_parameters as inputs
if __name__ == '__main__':
ctx.logger.info("I am {}".format(repr(ctx.instance.id)))
properties = {}
# use properties provided by install
properties.update(ctx.node.properties)
# use properties provided by current node
if 'config' not in ctx.instance.runtime_properties:
ctx.instance.runtime_properties['config'] = {}
properties.update(ctx.instance.runtime_properties['config'])
# use properties provided by workflow parameters
properties.update(inputs)
ctx.logger.info("Resulted properties: {}".format(properties))
| cloudify-incubator/cloudify-utilities-plugin | cloudify_scalelist/examples/scripts/tree_update.py | Python | apache-2.0 | 1,283 |
# -*- coding: utf-8 -*-
# -*- mode: python -*-
import time
class Ticker(object):
def __init__(self):
self.lasttick = 0
def elapsed(self, tick=True) -> int:
now = time.monotonic()
ltick = self.lasttick
if tick:
self.lasttick = now
if ltick > 0:
return now - ltick
else:
return 0
def tick(self) -> None:
self.lasttick = time.monotonic()
def __iter__(self):
while True:
yield self.elapsed()
| waipu/bakawipe | lib/sup/ticker.py | Python | gpl-3.0 | 520 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Nicira, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.etree import ElementTree
from nova import flags
from nova import test
from nova import utils
from nova.virt import firewall
from nova.virt.libvirt import vif
from nova.virt.libvirt import connection
FLAGS = flags.FLAGS
class LibvirtVifTestCase(test.TestCase):
net = {
'cidr': '101.168.1.0/24',
'cidr_v6': '101:1db9::/64',
'gateway_v6': '101:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'br0',
'bridge_interface': 'eth0',
'vlan': 99,
'gateway': '101.168.1.1',
'broadcast': '101.168.1.255',
'dns1': '8.8.8.8'
}
mapping = {
'mac': 'ca:fe:de:ad:be:ef',
'gateway_v6': net['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz'
}
instance = {
'uuid': 'instance-uuid'
}
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
self.flags(allow_same_net_traffic=True)
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
t = __import__('Cheetah.Template', globals(), locals(),
['Template'], -1)
self.Template = t.Template
xml_file = open(FLAGS.libvirt_xml_template)
self.xml_template = xml_file.read()
def _create_xml_info(self, vif_type, nics):
return {
'type': 'qemu',
'name': 'fake-name',
'uuid': 'fake-uuid',
'memory_kb': 100 * 1024,
'basepath': 'foobar',
'vcpus': 4,
'rescue': False,
'disk_prefix': '/dev/sda',
'driver_type': 'raw',
'root_device_type': 'disk',
'vif_type': vif_type,
'nics': nics,
'ebs_root': True,
'ephemeral_device': False,
'volumes': [],
'use_virtio_for_bridges': False,
'ephemerals': []}
def _get_instance_xml(self, driver, vif_type):
nic_dict = driver.plug(self.instance, self.net, self.mapping)
xml_info = self._create_xml_info(vif_type, [nic_dict])
xml = str(self.Template(self.xml_template, searchList=[xml_info]))
return xml
def test_bridge_driver(self):
d = vif.LibvirtBridgeDriver()
xml = self._get_instance_xml(d, 'bridge')
doc = ElementTree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
self.assertEqual(br_name, self.net['bridge'])
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
d.unplug(None, self.net, self.mapping)
def test_ovs_ethernet_driver(self):
d = vif.LibvirtOpenVswitchDriver()
xml = self._get_instance_xml(d, 'ethernet')
doc = ElementTree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
self.assertEqual(node.get("type"), "ethernet")
dev_name = node.find("target").get("dev")
self.assertTrue(dev_name.startswith("tap"))
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
script = node.find("script").get("path")
self.assertEquals(script, "")
d.unplug(None, self.net, self.mapping)
def test_ovs_virtualport_driver(self):
d = vif.LibvirtOpenVswitchVirtualPortDriver()
xml = self._get_instance_xml(d, 'ovs_virtualport')
doc = ElementTree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
self.assertEqual(br_name, FLAGS.libvirt_ovs_bridge)
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "openvswitch")
iface_id_found = False
for p_elem in vp.findall("parameters"):
iface_id = p_elem.get("interfaceid", None)
if iface_id:
self.assertEqual(iface_id, self.mapping['vif_uuid'])
iface_id_found = True
self.assertTrue(iface_id_found)
d.unplug(None, self.net, self.mapping)
def test_quantum_bridge_ethernet_driver(self):
d = vif.QuantumLinuxBridgeVIFDriver()
xml = self._get_instance_xml(d, 'ethernet')
doc = ElementTree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
self.assertEqual(node.get("type"), "ethernet")
dev_name = node.find("target").get("dev")
self.assertTrue(dev_name.startswith("tap"))
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
script = node.find("script").get("path")
self.assertEquals(script, "")
d.unplug(None, self.net, self.mapping)
| anbangr/trusted-nova | nova/tests/test_libvirt_vif.py | Python | apache-2.0 | 6,112 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure_devtools.scenario_tests import RecordingProcessor
class URIIdentityReplacer(RecordingProcessor):
"""Replace the identity in request uri"""
def process_request(self, request):
import re
request.uri = re.sub('/identities/([^/?]+)', '/identities/sanitized', request.uri)
return request
def process_response(self, response):
import re
if 'url' in response:
response['url'] = re.sub('/identities/([^/?]+)', '/identities/sanitized', response['url'])
return response
| Azure/azure-sdk-for-python | sdk/communication/azure-communication-chat/tests/helper.py | Python | mit | 855 |
#quest by zerghase
import sys
from com.l2scoria import Config
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "42_HelpTheUncle"
WATERS=30828
SOPHYA=30735
TRIDENT=291
MAP_PIECE=7548
MAP=7549
PET_TICKET=7583
MONSTER_EYE_DESTROYER=20068
MONSTER_EYE_GAZER=20266
MAX_COUNT=30
MIN_LEVEL=25
class Quest (JQuest) :
def onEvent(self, event, st):
htmltext=event
if event=="1":
htmltext="30828-01.htm"
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event=="3" and st.getQuestItemsCount(TRIDENT):
htmltext="30828-03.htm"
st.takeItems(TRIDENT,1)
st.set("cond","2")
elif event=="4" and st.getQuestItemsCount(MAP_PIECE)>=MAX_COUNT:
htmltext="30828-05.htm"
st.takeItems(MAP_PIECE,MAX_COUNT)
st.giveItems(MAP,1)
st.set("cond", "4")
elif event=="5" and st.getQuestItemsCount(MAP):
htmltext="30735-06.htm"
st.takeItems(MAP,1)
st.set("cond","5")
elif event=="7":
htmltext="30828-07.htm"
st.giveItems(PET_TICKET,1)
st.unset("cond")
st.setState(COMPLETED)
st.exitQuest(0)
return htmltext
def onTalk(self, npc, player):
htmltext="<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId=npc.getNpcId()
id=st.getState()
if id==CREATED:
if player.getLevel()>=MIN_LEVEL:
htmltext="30828-00.htm"
else:
htmltext="<html><body>This quest can only be taken by characters that have a minimum level of %s. Return when you are more experienced.</body></html>" % MIN_LEVEL
st.exitQuest(1)
elif id==STARTED:
cond=st.getInt("cond")
if npcId==WATERS:
if cond==1:
if not st.getQuestItemsCount(TRIDENT):
htmltext="30828-01a.htm"
else:
htmltext="30828-02.htm"
elif cond==2:
htmltext="30828-03a.htm"
elif cond==3:
htmltext="30828-04.htm"
elif cond==4:
htmltext="30828-05a.htm"
elif cond==5:
htmltext="30828-06.htm"
elif npcId==SOPHYA and id == STARTED:
cond=st.getInt("cond")
if cond==4 and st.getQuestItemsCount(MAP):
htmltext="30735-05.htm"
elif cond==5:
htmltext="30735-06a.htm"
elif id==COMPLETED:
st.exitQuest(0)
htmltext="<html><body>This quest has already been completed.</body></html>"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
npcId = npc.getNpcId()
cond=st.getInt("cond")
if cond==2:
numItems,chance = divmod(100*Config.RATE_QUESTS_REWARD,100)
if st.getRandom(100) < chance :
numItems = numItems +1
pieces=st.getQuestItemsCount(MAP_PIECE)
if pieces + numItems >= MAX_COUNT :
numItems = MAX_COUNT - pieces
if numItems != 0:
st.playSound("ItemSound.quest_middle")
st.set("cond", "3")
else :
st.playSound("ItemSound.quest_itemget")
st.giveItems(MAP_PIECE,int(numItems))
return
QUEST=Quest(42,qn,"Help The Uncle!")
CREATED=State('Start', QUEST)
STARTED=State('Started', QUEST)
COMPLETED=State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(WATERS)
QUEST.addTalkId(WATERS)
QUEST.addTalkId(SOPHYA)
QUEST.addKillId(MONSTER_EYE_DESTROYER)
QUEST.addKillId(MONSTER_EYE_GAZER) | zenn1989/scoria-interlude | L2Jscoria-Game/data/scripts/quests/42_HelpTheUncle/__init__.py | Python | gpl-3.0 | 3,707 |
#!/usr/bin/env python
import datetime
import logging
import os
import re
from urllib.parse import urljoin
from utils import utils, inspector, admin
# https://www.si.edu/OIG
archive = 2003
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
#
# The general strategy for this report is a bit complicated. First we go through
# the RSS feed and add reports. This gives us a bunch of reports with definitive
# published dates. Unfortunately, this feed is only updated sporadically.
# Next, we use the main audits page since this also gives us accurate published
# dates for the newer reports that are listed. Lastly, we fall back to the
# archives page, which unfortunately only lists the year the report was
# published.
#
# Since a report can be listed on multiple pages, it is important to use
# consistent report ids across pages. We only grab reports the first time a
# a given report id is seen.
RSS_URL = "https://www.si.edu/Content/OIG/Misc/OIG-RSS.xml"
RECENT_AUDITS_URL = "https://www.si.edu/OIG/Audits"
AUDIT_ARCHIVE_URL = "https://www.si.edu/oig/Archive"
OTHER_REPORTS_URL = "https://www.si.edu/OIG/ReportsToCongress"
RSS_BROKEN_LINKS = {
"http://www.si.edu/Content/OIG/Misc/Peer_Review_09-21-2011.pdf":
"https://www.si.edu/Content/OIG/Misc/Peer_Review_09-21-11.pdf",
"http://www.si.edu/oig/RecoveryAct.htm":
"https://www.si.edu/OIG/Recovery",
"http://www.si.edu/oig/AuditReports/UnderstandingAudits.pdf":
"https://www.si.edu/Content/OIG/Misc/UnderstandingAudits.pdf",
"http://www.si.edu/oig/AuditReports/A-0907-FSA-Oversight.pdf":
"https://www.si.edu/Content/OIG/Audits/2010/A-09-07.pdf",
"http://www.si.edu/oig/ARRA_Reports/M-10--04-1.pdf":
"https://www.si.edu/Content/OIG/Audits/M-10-04-1.pdf",
"http://www.si.edu/oig/AuditReports/SIIG_Testimony_121009.pdf":
"https://www.si.edu/Content/OIG/Testimony/SIIG_Testimony_121009.pdf",
"http://www.si.edu/oig/AuditReports/IBA-0902.pdf":
"https://www.si.edu/Content/OIG/Audits/2009/IBA-09-02.pdf",
"http://www.si.edu/oig/AuditReports/IBA-0808.pdf":
"https://www.si.edu/Content/OIG/Audits/2009/IBA-08-08.pdf",
"http://www.si.edu/oig/AuditReports/A-08-05-FSA-Oversight-Letter.pdf":
"https://www.si.edu/Content/OIG/Audits/2009/A-08-05.pdf",
}
REPORT_PUBLISHED_MAP = {
"FY16_CSA": datetime.datetime(2016, 8, 9),
"A-15-06": datetime.datetime(2015, 12, 10),
"A-14-06": datetime.datetime(2015, 7, 1),
"A-14-04": datetime.datetime(2014, 8, 6),
"A-13-07": datetime.datetime(2013, 7, 26),
"A-13-03": datetime.datetime(2013, 2, 27),
"A-13-01": datetime.datetime(2013, 10, 29),
"A-12-08": datetime.datetime(2013, 6, 3),
"A-11-05": datetime.datetime(2012, 5, 15),
"A-11-07": datetime.datetime(2013, 3, 30),
"M-11-03": datetime.datetime(2011, 7, 1),
"A-09-03-02": datetime.datetime(2009, 9, 14),
"A-09-03-01": datetime.datetime(2009, 9, 3),
"WRW_TravelReview": datetime.datetime(2008, 10, 28),
"A-07-09-1": datetime.datetime(2008, 7, 18),
"SBVCEOAUPFY06-7": datetime.datetime(2008, 7, 7),
"SecretaryAUPFY06-7": datetime.datetime(2008, 7, 3),
"IBA-07-08": datetime.datetime(2008, 3, 31),
"A-07-01": datetime.datetime(2008, 3, 31),
"A-07-05": datetime.datetime(2008, 2, 25),
"A-06-04": datetime.datetime(2007, 1, 16),
"A-06-06R": datetime.datetime(2007, 1, 19),
"A-06-02R": datetime.datetime(2007, 1, 19),
"A-06-08": datetime.datetime(2007, 5, 16),
"SBVCEOAUP": datetime.datetime(2007, 7, 19),
"A-06-07": datetime.datetime(2007, 8, 10),
"A-06-01": datetime.datetime(2007, 8, 28),
"A-06-05": datetime.datetime(2007, 4, 20),
"A-07-06": datetime.datetime(2007, 9, 19),
"A-07-04": datetime.datetime(2007, 9, 28),
"A-05-03": datetime.datetime(2006, 2, 16),
"M-06-01": datetime.datetime(2006, 2, 14),
"IBA-05-05": datetime.datetime(2006, 7, 25),
"A-05-07": datetime.datetime(2006, 8, 21),
"A-05-06R": datetime.datetime(2006, 9, 29),
"A-04-05": datetime.datetime(2005, 1, 6),
"A-04-10": datetime.datetime(2005, 2, 25),
"A-04-11": datetime.datetime(2005, 3, 3),
"A-05-01": datetime.datetime(2005, 9, 29),
"A-05-04": datetime.datetime(2005, 9, 28),
"M-05-01": datetime.datetime(2006, 6, 8),
"M-05-02": datetime.datetime(2005, 9, 15),
"M-05-03": datetime.datetime(2005, 12, 5),
"A-03-07": datetime.datetime(2004, 3, 31),
"A-04-02": datetime.datetime(2004, 6, 16),
"A-04-03": datetime.datetime(2004, 9, 9),
"A-04-06": datetime.datetime(2004, 8, 25),
"A-04-07": datetime.datetime(2004, 7, 22),
"A-02-04": datetime.datetime(2003, 7, 31),
"A-02-06": datetime.datetime(2003, 1, 17),
"A-02-10": datetime.datetime(2003, 4, 16),
"A-03-02": datetime.datetime(2003, 3, 31),
"A-03-04": datetime.datetime(2003, 3, 27),
"A-03-05": datetime.datetime(2003, 12, 18),
"A-03-06": datetime.datetime(2003, 12, 3),
"A-03-08": datetime.datetime(2003, 9, 3),
}
URL_BLACKLIST = [
RECENT_AUDITS_URL,
OTHER_REPORTS_URL,
AUDIT_ARCHIVE_URL,
"https://get.adobe.com/reader/"
]
report_ids_seen = set()
def run(options):
year_range = inspector.year_range(options, archive)
# # Pull the RSS feed
doc = utils.beautifulsoup_from_url(RSS_URL)
results = doc.select("item")
if not results:
raise inspector.NoReportsFoundError("Smithsonian (RSS)")
for result in results:
report = rss_report_from(result, year_range)
if report:
inspector.save_report(report)
# # Pull the recent audit reports.
doc = utils.beautifulsoup_from_url(RECENT_AUDITS_URL)
results = doc.select("div.block > a")
if not results:
raise inspector.NoReportsFoundError("Smithsonian (recent audit reports)")
for result in results:
report = report_from(result, year_range)
if report:
inspector.save_report(report)
# Pull the archive audit reports
doc = utils.beautifulsoup_from_url(AUDIT_ARCHIVE_URL)
results = doc.select("div.block a")
if not results:
raise inspector.NoReportsFoundError("Smithsonian (audit archive)")
for result in results:
report = report_from(result, year_range)
if report:
inspector.save_report(report)
# Pull the other reports
doc = utils.beautifulsoup_from_url(OTHER_REPORTS_URL)
results = doc.select("div.block > a")
if not results:
raise inspector.NoReportsFoundError("Smithsonian (other)")
for result in results:
report = report_from(result, year_range)
if report:
inspector.save_report(report)
def report_type_from_url(report_url):
if 'Audit' in report_url or 'Announcements' in report_url:
return 'audit'
elif 'Semiannual' in report_url:
return 'semiannual_report'
elif 'Testimony' in report_url:
return 'testimony'
elif 'Peer_Review' in report_url:
return 'peer_review'
elif 'PressRelease' in report_url:
return 'press'
else:
return 'other'
def rss_report_from(result, year_range):
report_url = result.find("link").next_sibling.strip()
if report_url.rstrip("/") == 'http://www.si.edu/oig':
# This is the default url the IG uses for announcements of things like
# a website redesign or changes to the RSS feed.
return
if report_url == "http://www.si.edu/oig/OIGStratPlan.pdf":
# This strategic plan is no longer on the website, but it is reproduced in
# multiple semiannual reports, so we skip it here.
return
if report_url in RSS_BROKEN_LINKS:
report_url = RSS_BROKEN_LINKS[report_url]
else:
report_url = report_url.replace("/OIG/SAR/Semiannual_Reports/", "/OIG/SAR/")
report_url = report_url.replace("/oig/Semiannual_Reports/", "/Content/OIG/SAR/")
report_url = report_url.replace("/oig/AuditReports/", "/Content/OIG/Audits/")
report_url = report_url.replace("/oig/ARRA_Reports/", "/Content/OIG/Audits/")
file_type = None
if not report_url.endswith(".pdf"):
file_type = "html"
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
if report_id in report_ids_seen:
return
report_ids_seen.add(report_id)
title = result.find("title").text
report_type = report_type_from_url(report_url)
published_on = None
published_on_text = result.find("pubdate").text
try:
published_on = datetime.datetime.strptime(published_on_text, '%a, %d %b %Y %H:%M:%S %z').date()
except ValueError:
pass
if not published_on:
try:
published_on = datetime.datetime.strptime(published_on_text, '%a, %d %B %Y %H:%M:%S %z').date()
except ValueError:
pass
if not published_on:
admin.log_no_date("smithsonian", report_id, title, report_url)
return
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'smithsonian',
'inspector_url': 'https://www.si.edu/OIG',
'agency': 'smithsonian',
'agency_name': 'Smithsonian Institution',
'type': report_type,
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if file_type:
report['file_type'] = file_type
return report
def report_from(result, year_range):
report_url = urljoin(RECENT_AUDITS_URL, result.get('href'))
if report_url in URL_BLACKLIST:
return None
# Strip extra path adjustments
report_url = report_url.replace("../", "")
summary = None
if not report_url.endswith(".pdf"):
# Some reports link to other page which link to the full report
report_page = utils.beautifulsoup_from_url(report_url)
relative_report_url = report_page.select("div.block a[href]")[0]['href']
report_url = urljoin(report_url, relative_report_url)
# Strip extra path adjustments
report_url = report_url.replace("../", "")
summary = "\n".join(paragraph.text for paragraph in report_page.select("div.grid_12 p"))
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
if report_id in report_ids_seen:
return
report_ids_seen.add(report_id)
title = result.text.strip()
report_type = report_type_from_url(report_url)
if not title:
return None
estimated_date = False
published_on = None
if report_id in REPORT_PUBLISHED_MAP:
published_on = REPORT_PUBLISHED_MAP[report_id]
if not published_on:
try:
published_on_text = "/".join(re.search('(\w+) (\d+), (\d+)', title).groups())
published_on = datetime.datetime.strptime(published_on_text, '%B/%d/%Y')
except AttributeError:
pass
if not published_on:
month_year_match = MONTH_YEAR_RE.search(result.text)
if month_year_match:
date_text = ' '.join(month_year_match.group(0).split())
published_on = datetime.datetime.strptime(date_text, '%B %Y')
estimated_date = True
if not published_on:
admin.log_no_date("smithsonian", report_id, title, report_url)
return
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'smithsonian',
'inspector_url': 'https://www.si.edu/OIG',
'agency': 'smithsonian',
'agency_name': 'Smithsonian Institution',
'type': report_type,
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if summary:
report['summary'] = summary
if estimated_date:
report['estimated_date'] = estimated_date
return report
MONTH_YEAR_RE = re.compile('(?:January|February|March|April|May|June|July|'
'August|September|October|November|December)\s+'
'[0-9]{4}')
utils.run(run) if (__name__ == "__main__") else None
| divergentdave/inspectors-general | inspectors/smithsonian.py | Python | cc0-1.0 | 11,701 |
# -*- encoding: utf-8 -*-
"""
Cross-validation.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.utils.compatibility import * # NOQA
class H2OPartitionIterator(object):
def __init__(self, n):
if abs(n - int(n)) >= 1e-15: raise ValueError("n must be an integer")
self.n = int(n)
self.masks = None
def __iter__(self):
for test_mask in self._test_masks():
yield 1 - test_mask, test_mask
def _test_masks(self):
raise NotImplementedError()
class H2OKFold(H2OPartitionIterator):
def __init__(self, fr, n_folds=3, seed=-1):
H2OPartitionIterator.__init__(self, len(fr))
self.n_folds = n_folds
self.fr = fr
self.seed = seed
self.fold_assignments = None
def __len__(self):
return self.n_folds
def _test_masks(self):
if self.fold_assignments is None:
self._assign_folds()
if self.masks is None: self.masks = [i == self.fold_assignments for i in range(self.n_folds)]
return self.masks
def _assign_folds(self):
if self.fr is None: raise ValueError("No H2OFrame available for computing folds.")
self.fold_assignments = self.fr.kfold_column(self.n_folds, self.seed)
self.fr = None
class H2OStratifiedKFold(H2OPartitionIterator):
def __init__(self, y, n_folds=3, seed=-1):
H2OPartitionIterator.__init__(self, len(y))
self.n_folds = n_folds
self.y = y
self.seed = seed
self.fold_assignments = None
def __len__(self):
return self.n_folds
def _test_masks(self):
if self.fold_assignments is None:
self._assign_folds()
if self.masks is None: self.masks = [i == self.fold_assignments for i in range(self.n_folds)]
return self.masks
def _assign_folds(self):
if self.y is None: raise ValueError("No y available for computing stratified folds.")
self.fold_assignments = self.y.stratified_kfold_column(self.n_folds, self.seed)
self.y = None
| jangorecki/h2o-3 | h2o-py/h2o/cross_validation.py | Python | apache-2.0 | 2,183 |
#i/usr/bin/env python
import os,sys,time
#get network RX,TX
def rxtxFunction():
fileName='/proc/net/dev'
try:
fd=open(fileName)
except IOError, e:
print e
exit(1)
content=((fd.read()).replace(':',': ')).split('\n')
rxTotal=0
txTotal=0
for item in content:
if 'eth' in item:
array=str(item).split()
rxTotal=rxTotal+int(array[1])
txTotal=txTotal+int(array[9])
return rxTotal,txTotal
## main function
def main():
rxTotal0=0
txTotal0=0
sleepTime=1
if len(sys.argv)<2:
sleepTime=1
elif (sys.argv[1]).isdigit()==False:
print 'argv[1]:please give a number,unit: second'
exit(1)
else:
sleepTime=int(sys.argv[1])
if sleepTime<=0:
print 'argv[1]: must larger than 0'
exit(1)
while True:
rxTotal0,txTotal0=rxtxFunction()
time.sleep(sleepTime)
rxTotal1,txTotal1=rxtxFunction()
print time.strftime('%H:%M:%S',time.localtime(time.time()))+ \
' arg(RX)=' + str("%.4f" % ((rxTotal1-rxTotal0)*1.0/1024/1024/sleepTime)) + ' MB/s' + \
' '*5 + 'arg(TX)=' + str("%.4f" % ((txTotal1-txTotal0)*1.0/1024/1024/sleepTime)) + ' MB/s'
if __name__=='__main__':
main()
| linwanggm/script | net.py | Python | gpl-2.0 | 1,294 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
import unittest
import frappe
test_records = frappe.get_test_records('Monthly Distribution')
class TestMonthlyDistribution(unittest.TestCase):
pass
| mhbu50/erpnext | erpnext/accounts/doctype/monthly_distribution/test_monthly_distribution.py | Python | gpl-3.0 | 258 |
"""Script to read ARIS utility billing data via the ARIS API and
transform the data into a format that can be processed by the Fairbanks
North Star Borough (FNSB) benchmarking script. The output of this script
is a CSV file containing a record for each fuel purchase for a building,
placed in the data subdirectory with the name 'aris_records.csv'. A second
output is an Excel file with general information for each building present
in the ARIS database; this file is found at 'data/Buildings.xlsx'. It is
used by the main benchmarking script. Inputs for this script come from
the settings.py file in this directory.
"""
import time
from datetime import timedelta
import pandas as pd
import numpy as np
import requests
import settings # settings file for this application
print('\nScript started: {}'.format(time.ctime()))
# URLs and Passwords
base_url = settings.ARIS_API_URL
my_username = settings.ARIS_USERNAME
my_password = settings.ARIS_PASSWORD
# Get the full list of buildings
my_params = {'username': my_username,
'password':my_password}
building_list_url = base_url + '/GetBuildingList'
# Errors occur here. Try three times.
for i in range(3):
try:
results = requests.post(building_list_url, params=my_params).json()
break
except:
if i==2:
raise
else:
# wait 5 seconds before trying again
time.sleep(5)
df_bldgs = pd.DataFrame(results)
# Add a Degree-Day Site column by looking up via zip code
df_zip_to_dd = pd.read_excel('data/Zip_to_DD_Site.xlsx', skiprows=4)
df_zip_to_dd['zip_code'] = df_zip_to_dd.zip_code.astype(str)
zip_to_dd = dict(zip(df_zip_to_dd.zip_code, df_zip_to_dd.dd_site))
df_bldgs['dd_site'] = df_bldgs.BuildingZip.map(zip_to_dd)
# So need to find the zip codes that don't map to a Degree-Day site
# 'dd_site != dd_site' is a hack for finding NaN values.
df_no_map = df_bldgs.query('(BuildingZip > "") and (dd_site != dd_site)')
print('''The following zip codes do not have an entry in the
"data/Zip_to_DD_Site.xlsx" file, so no degree-days are available:
{}'''.format(df_no_map.BuildingZip.unique()))
# Rename columns and write out the Excel file describing the buildings.
col_map = [
('BuildingId', 'site_id'),
('BuildingName', 'site_name'),
('BuildingOwnerName', 'site_category'),
('BuildingStreet', 'address'),
('BuildingCity', 'city'),
('BuildingUsageName', 'primary_func'),
('YearBuilt', 'year_built'),
('SquareFeet', 'sq_ft'),
('dd_site', 'dd_site')
]
old_cols, new_cols = zip(*col_map)
df_bldgs2 = df_bldgs[list(old_cols)].copy()
df_bldgs2.columns = new_cols
df_bldgs2['onsite_gen'] = '' # not used
df_bldgs2.to_excel('data/Buildings.xlsx', startrow=3, index=False)
# ----------- Now work on the detailed records, processing to a form
# ----------- usable by the FNSB script.
building_detail_url = base_url + '/GetBuildingEnergyDetail'
my_data = {'username': my_username,
'password':my_password,
'buildingId': None}
dfd = None
next_prn = time.time()
for bldg_id in df_bldgs2.site_id.unique():
my_data['buildingId'] = bldg_id
# Errors occur here. Try three times.
for i in range(3):
try:
detail = requests.post(building_detail_url, data=my_data).json()
break
except:
if i==2:
raise
else:
# wait 5 seconds before trying again
time.sleep(5)
if len(detail['BuildingEnergyDetailList']):
df_detail = pd.DataFrame(detail['BuildingEnergyDetailList'])
# Get rid of unneeded columns
df_detail.drop(columns=['EnergyTypeId', 'EnergyUnitId', 'UsageYear'], inplace=True)
if dfd is not None:
dfd = dfd.append(df_detail, ignore_index=True)
else:
dfd = df_detail.copy()
if time.time() > next_prn:
print('{:,} records fetched'.format(len(dfd)))
next_prn += 10.0 # wait 10 seconds before printing
# Change columns to correct data types
dfd = dfd.apply(pd.to_numeric, errors='ignore')
dfd[['UsageDate', 'MeterReadDate']] = dfd[['UsageDate', 'MeterReadDate']].apply(pd.to_datetime)
# For the usage end date, 'Thru', use the MeterReadDate if available, otherwise
# use the middle of the UsageDate month.
def thru_date(row):
if pd.isnull(row.MeterReadDate):
return row.UsageDate.replace(day=15)
else:
return row.MeterReadDate
dfd['Thru'] = dfd.apply(thru_date, axis=1)
# Change 'Demand - Electric' to 'Electric'
dfd.loc[dfd.EnergyTypeName == 'Demand - Electric', 'EnergyTypeName'] = 'Electric'
# There are a number of records where the EnergyQuantity is 0 or NaN,
# which probably occurs because someone doesn't have the bill for that
# month or there was no fuel fill-up in that month. We will eliminate
# those records, because they distort the period over which fuel usage
# occurred for sporadically bought fuels like oil and wood. For
# monthly-billed fuels, we will later in the code make sure that the
# From - Thru billing period only covers 1 month.
# Start by converting 0s to NaN to make future tests easier.
dfd.loc[dfd.EnergyQuantity == 0.0, 'EnergyQuantity'] = np.NaN
dfd.loc[dfd.DemandUse == 0.0, 'DemandUse'] = np.NaN
# Also found that there were a bunch of -1.0 values for DemandUse that
# are very likely not valid.
dfd.loc[dfd.DemandUse == -1.0, 'DemandUse'] = np.NaN
# Now filter down to just the records where we have a number for
# either EnergyQuantity or DemandUse.
mask = ~(dfd.EnergyQuantity.isnull() & dfd.DemandUse.isnull())
dfd = dfd[mask].copy()
# Fill out the From date by using the Thru date from the prior bill
# for the building and for the particular fuel type
df_final = None
for gp, recs in dfd.groupby(['BuildingId', 'EnergyTypeName']):
recs = recs.sort_values(['Thru']).copy()
# Start date comes from prior record
recs['From'] = recs.Thru.shift(1)
recs['Item Description'] = 'Energy'
if df_final is None:
df_final = recs.copy()
else:
df_final = df_final.append(recs, ignore_index=True)
# For the services that are normally billed on a monthly basis, fill out
# any missing From dates (e.g. the first bill for a building) with a value
# 30 days prior to Thru. Also, restrict the Thru - From difference to 25 to 35 days.
# If it is outside that range, set to Thru - 30 days.
# Fuel types that are normally billed on a monthly basis
mo_fuels = ['Electric', 'Natural Gas', 'Steam District Ht', 'Hot Wtr District Ht']
mask_mo = df_final.EnergyTypeName.isin(mo_fuels)
# Find records of that type that have NaT for From date and
# set to 30 days prior to Thru
df_final.loc[mask_mo & df_final.From.isnull(), 'From'] = df_final.Thru - timedelta(days=30)
# Now find any records where Thru - From is outside 25 - 35 window and fix those.
# Perhaps they are buildings where there are two separate electric bills.
bill_len = df_final.Thru - df_final.From
mask2 = mask_mo & ((bill_len < timedelta(days=25)) | (bill_len > timedelta(days=35)))
df_final.loc[mask2, 'From'] = df_final.Thru - timedelta(days=30)
# Now work on the fuel types that are not billed monthly. Some of these records
# have NaT for the From date because they were the first record for the building
# and a particular fuel type. We will ultimately delete these. In this step
# find sporadically billed records that have a billing length of greater than 450
# days and put a NaT in for From, so that deleting all From==NaT records will catch
# them as well. A billing period more than 450 days probably indicates that a fuel
# fill was missed making the record invalid.
mask_sporadic = ~mask_mo
mask3 = mask_sporadic & (bill_len > timedelta(days=450))
df_final.loc[mask3, 'From'] = pd.NaT
# Now eliminate all the sporadically billed records that have a From
# with a NaT
mask_elim = (mask_sporadic & df_final.From.isnull())
df_final = df_final[~mask_elim].copy()
# Now add the Electric Demand Charge records. The From-Thru dates on these
# have already been set. The demand quantity and cost
# appear in separate, dedicated columns, but we will move them to the 'EnergyQuantity'
# and 'DollarCost' columns.
df_demand = df_final.query('DemandUse > 0 and EnergyTypeName=="Electric"').copy()
df_demand['EnergyQuantity'] = df_demand.DemandUse
df_demand['DollarCost'] = df_demand.DemandCost
df_demand['EnergyUnitTypeName'] = 'kW'
df_demand['Item Description'] = 'Demand Charge'
# add these to the final DataFrame
df_final = df_final.append(df_demand, ignore_index=True)
# Eliminate the columns that are not needed
df_final.drop(columns=['DemandCost', 'DemandUse', 'MeterReadDate', 'UsageDate'], inplace=True)
col_map = {
'BuildingId': 'Site ID',
'EnergyTypeName': 'Service Name',
'EnergyUnitTypeName': 'Units',
'EnergyQuantity': 'Usage',
'DollarCost': 'Cost',
}
df_final.rename(col_map, axis=1, inplace=True)
# These fields are used in the report summarizing vendors.
df_final['Account Number'] = ''
df_final['Vendor Name'] = ''
# Save the final results as a CSV file and a pickle
df_final.to_pickle('data/aris_records.pkl')
df_final.to_csv('data/aris_records.csv', index=False)
print('Script completed: {}'.format(time.ctime()))
| alanmitchell/fnsb-benchmark | read_aris.py | Python | mit | 9,257 |
#!/usr/bin/python
#my_subnet = input("Enter Subnet address:")
my_ip = "192.168.1.100"
my_subnet = "255.255.255.0"
ip = my_ip.split(".")
print ip
# Check the validity of the ip address
while True:
#my_ip = input("Enter a ip address:")
if int(ip[0]) <= 223 and (int(ip[1]) != 169 or int(ip[2]) != 254) and (int(ip[1]) <= 254 and int(ip[2]) <= 254 and int(ip[3]) <= 254):
print "You entered a valid ip"
break
else:
print "You entered a wrong ip"
continue
bin_ip = ""
for a in ip:
print bin(int(a)).split("b")[1].zfill(8)
bin_ip += bin(int(a)).split("b")[1].zfill(8)
print bin_ip
print type(bin_ip)
| hiteshagrawal/python | networking/subnet-cal.py | Python | gpl-2.0 | 620 |
from gwpy.timeseries import TimeSeriesDict | gwpy/gwpy.github.io | docs/v0.5/examples/timeseries/blrms-1.py | Python | gpl-3.0 | 42 |
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with multiple processes."""
from __future__ import absolute_import, division, with_statement
import errno
import functools
import os
import signal
import subprocess
import sys
import time
from binascii import hexlify
from tornado import ioloop
from tornado.iostream import PipeIOStream
from tornado.log import gen_log
from tornado import stack_context
try:
import multiprocessing # Python 2.6+
except ImportError:
multiprocessing = None
def cpu_count():
"""Returns the number of processors on this machine."""
if multiprocessing is not None:
try:
return multiprocessing.cpu_count()
except NotImplementedError:
pass
try:
return os.sysconf("SC_NPROCESSORS_CONF")
except ValueError:
pass
gen_log.error("Could not detect number of processors; assuming 1")
return 1
def _reseed_random():
if 'random' not in sys.modules:
return
import random
# If os.urandom is available, this method does the same thing as
# random.seed (at least as of python 2.6). If os.urandom is not
# available, we mix in the pid in addition to a timestamp.
try:
seed = long(hexlify(os.urandom(16)), 16)
except NotImplementedError:
seed = int(time.time() * 1000) ^ os.getpid()
random.seed(seed)
_task_id = None
def fork_processes(num_processes, max_restarts=100):
"""Starts multiple worker processes.
If ``num_processes`` is None or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If ``num_processes`` is given and > 0, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the debug=True option to `tornado.web.Application`).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``fork_processes``.
In each child process, ``fork_processes`` returns its *task id*, a
number between 0 and ``num_processes``. Processes that exit
abnormally (due to a signal or non-zero exit status) are restarted
with the same id (up to ``max_restarts`` times). In the parent
process, ``fork_processes`` returns None if all child processes
have exited normally, but will otherwise only exit by throwing an
exception.
"""
global _task_id
assert _task_id is None
if num_processes is None or num_processes <= 0:
num_processes = cpu_count()
if ioloop.IOLoop.initialized():
raise RuntimeError("Cannot run in multiple processes: IOLoop instance "
"has already been initialized. You cannot call "
"IOLoop.instance() before calling start_processes()")
gen_log.info("Starting %d processes", num_processes)
children = {}
def start_child(i):
pid = os.fork()
if pid == 0:
# child process
_reseed_random()
global _task_id
_task_id = i
return i
else:
children[pid] = i
return None
for i in range(num_processes):
id = start_child(i)
if id is not None:
return id
num_restarts = 0
while children:
try:
pid, status = os.wait()
except OSError, e:
if e.errno == errno.EINTR:
continue
raise
if pid not in children:
continue
id = children.pop(pid)
if os.WIFSIGNALED(status):
gen_log.warning("child %d (pid %d) killed by signal %d, restarting",
id, pid, os.WTERMSIG(status))
elif os.WEXITSTATUS(status) != 0:
gen_log.warning("child %d (pid %d) exited with status %d, restarting",
id, pid, os.WEXITSTATUS(status))
else:
gen_log.info("child %d (pid %d) exited normally", id, pid)
continue
num_restarts += 1
if num_restarts > max_restarts:
raise RuntimeError("Too many child restarts, giving up")
new_id = start_child(id)
if new_id is not None:
return new_id
# All child processes exited cleanly, so exit the master process
# instead of just returning to right after the call to
# fork_processes (which will probably just start up another IOLoop
# unless the caller checks the return value).
sys.exit(0)
def task_id():
"""Returns the current task id, if any.
Returns None if this process was not created by `fork_processes`.
"""
global _task_id
return _task_id
class Subprocess(object):
"""Wraps ``subprocess.Popen`` with IOStream support.
The constructor is the same as ``subprocess.Popen`` with the following
additions:
* ``stdin``, ``stdout``, and ``stderr`` may have the value
`tornado.process.Subprocess.STREAM`, which will make the corresponding
attribute of the resulting Subprocess a `PipeIOStream`.
* A new keyword argument ``io_loop`` may be used to pass in an IOLoop.
"""
STREAM = object()
_initialized = False
_waiting = {}
def __init__(self, *args, **kwargs):
self.io_loop = kwargs.pop('io_loop', None)
to_close = []
if kwargs.get('stdin') is Subprocess.STREAM:
in_r, in_w = os.pipe()
kwargs['stdin'] = in_r
to_close.append(in_r)
self.stdin = PipeIOStream(in_w, io_loop=self.io_loop)
if kwargs.get('stdout') is Subprocess.STREAM:
out_r, out_w = os.pipe()
kwargs['stdout'] = out_w
to_close.append(out_w)
self.stdout = PipeIOStream(out_r, io_loop=self.io_loop)
if kwargs.get('stderr') is Subprocess.STREAM:
err_r, err_w = os.pipe()
kwargs['stderr'] = err_w
to_close.append(err_w)
self.stdout = PipeIOStream(err_r, io_loop=self.io_loop)
self.proc = subprocess.Popen(*args, **kwargs)
for fd in to_close:
os.close(fd)
for attr in ['stdin', 'stdout', 'stderr', 'pid']:
if not hasattr(self, attr): # don't clobber streams set above
setattr(self, attr, getattr(self.proc, attr))
self._exit_callback = None
self.returncode = None
def set_exit_callback(self, callback):
"""Runs ``callback`` when this process exits.
The callback takes one argument, the return code of the process.
This method uses a ``SIGCHILD`` handler, which is a global setting
and may conflict if you have other libraries trying to handle the
same signal. If you are using more than one ``IOLoop`` it may
be necessary to call `Subprocess.initialize` first to designate
one ``IOLoop`` to run the signal handlers.
In many cases a close callback on the stdout or stderr streams
can be used as an alternative to an exit callback if the
signal handler is causing a problem.
"""
self._exit_callback = stack_context.wrap(callback)
Subprocess.initialize(self.io_loop)
Subprocess._waiting[self.pid] = self
Subprocess._try_cleanup_process(self.pid)
@classmethod
def initialize(cls, io_loop=None):
"""Initializes the ``SIGCHILD`` handler.
The signal handler is run on an IOLoop to avoid locking issues.
Note that the IOLoop used for signal handling need not be the
same one used by individual Subprocess objects (as long as the
IOLoops are each running in separate threads).
"""
if cls._initialized:
return
if io_loop is None:
io_loop = ioloop.IOLoop.instance()
cls._old_sigchld = signal.signal(
signal.SIGCHLD,
lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup))
cls._initialized = True
@classmethod
def uninitialize(cls):
"""Removes the ``SIGCHILD`` handler."""
if not cls._initialized:
return
signal.signal(signal.SIGCHLD, cls._old_sigchld)
cls._initialized = False
@classmethod
def _cleanup(cls):
for pid in cls._waiting.keys():
cls._try_cleanup_process(pid)
@classmethod
def _try_cleanup_process(cls, pid):
try:
ret_pid, status = os.waitpid(pid, os.WNOHANG)
except OSError, e:
if e.args[0] == errno.ECHILD:
return
if ret_pid == 0:
return
assert ret_pid == pid
subproc = cls._waiting.pop(pid)
subproc.io_loop.add_callback_from_signal(
subproc._set_returncode, status)
def _set_returncode(self, status):
if os.WIFSIGNALED(status):
self.returncode = -os.WTERMSIG(status)
else:
assert os.WIFEXITED(status)
self.returncode = os.WEXITSTATUS(status)
if self._exit_callback:
callback = self._exit_callback
self._exit_callback = None
callback(self.returncode)
| norus/procstat-json | tornado/process.py | Python | gpl-3.0 | 9,841 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Authors: Adrien Peiffer
# Copyright (c) 2015 Acsone SA/NV (http://www.acsone.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class res_company(orm.Model):
_inherit = 'res.company'
_columns = {
'accrual_taxes': fields.boolean(string='Accrual On Taxes')
}
| open-synergy/account-closing | account_invoice_accrual/company.py | Python | agpl-3.0 | 1,152 |
import string
from numpy import log
import startup, subject_info
from models import CompleteEnrollmentData, SubjectInfo
"""
Find classes that are most similar to the given keywords
"""
def keyword_similarity(user_keywords):
stop_list = set('for a of also by the and to in on as at from with but how about such eg ie'.split()) # remove common words
sim_ratings = {}
class_titles = {}
subject_info = SubjectInfo.objects.filter(subject__in=startup.mit_classes).values("subject", "keywords", "title")
for row in subject_info:
cl = row["subject"]
title = row["title"]
class_keywords = [x for x in row["keywords"] if x not in stop_list]
if len(class_keywords) == 0 or len(title) == 0:
continue
class_titles[cl] = title
# Get each word in title of class
title_list = title.split()
title_keywords = [x for x in title_list if x not in stop_list]
keywords_list = user_keywords.split()
sim_rating = startup.model.n_similarity(class_keywords, keywords_list)
try:
sim_rating += 2*startup.model.n_similarity(title_keywords, keywords_list)
# sim_rating += log(total)
except:
print cl
sim_ratings[cl] = sim_rating
# sort and return
sorted_sim_ratings = sorted(sim_ratings, key=sim_ratings.get, reverse=True)
recs = []
for c in sorted_sim_ratings[:20]:
try:
title = class_titles[c]
except:
title = ""
recs.append((c, title))
return recs
| lchu94/classfinder | recommender/keyword_similarity.py | Python | mit | 1,585 |
"""
Premium Question
"""
__author__ = 'Daniel'
"""
This is the interface that allows for creating nested lists.
You should not implement it, or speculate about its implementation
"""
class NestedInteger(object):
def __init__(self, value=None):
"""
If value is not specified, initializes an empty list.
Otherwise initializes a single integer equal to value.
"""
def isInteger(self):
"""
@return True if this NestedInteger holds a single integer, rather than a nested list.
:rtype bool
"""
def add(self, elem):
"""
Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
:rtype void
"""
def setInteger(self, value):
"""
Set this NestedInteger to hold a single integer equal to value.
:rtype void
"""
def getInteger(self):
"""
@return the single integer that this NestedInteger holds, if it holds a single integer
Return None if this NestedInteger holds a nested list
:rtype int
"""
def getList(self):
"""
@return the nested list that this NestedInteger holds, if it holds a nested list
Return None if this NestedInteger holds a single integer
:rtype List[NestedInteger]
"""
class Solution(object):
def __init__(self):
self.sum = 0
def depthSumInverse(self, nestedList):
"""
NestedInteger is a union type
:type nestedList: List[NestedInteger]
:rtype: int
"""
inv_depth = self.height(nestedList)
self.inverseDepthSum(nestedList, inv_depth)
return self.sum
def height(self, nl):
nl_lst = filter(lambda x: not x.isInteger(), nl)
if not nl_lst:
return 1
if nl_lst:
return 1 + max(
map(lambda x: self.height(x.getList()), nl_lst)
)
def inverseDepthSum(self, nl, inv_depth):
nl_lst = filter(lambda x: not x.isInteger(), nl)
ni_list = filter(lambda x: x.isInteger(), nl)
if nl_lst:
map(lambda x: self.inverseDepthSum(x.getList(), inv_depth - 1), nl_lst)
if ni_list:
self.sum += sum(map(lambda x: x.getInteger() * inv_depth, ni_list))
class SolutionError(object):
def __init__(self):
self.sum = 0
def depthSumInverse(self, nestedList):
"""
NestedInteger is a union type
:type nestedList: List[NestedInteger]
:rtype: int
"""
self.dfs(nestedList)
return self.sum
def dfs(self, nl):
"""
This dfs use height: the number of edges from to the leaves.
But the question is supposedly use height but the calculate sum top down; here is bottom up wrongly.
"""
height = 1
nl_lst = filter(lambda x: not x.isInteger(), nl)
ni_list = filter(lambda x: x.isInteger(), nl)
if nl_lst:
height = 1 + max(
map(lambda x: self.dfs(x.getList()), nl_lst)
)
if ni_list:
self.sum += sum(map(lambda x: x.getInteger() * height, ni_list))
return height
| algorhythms/LeetCode | 364 Nested List Weight Sum II.py | Python | mit | 3,219 |
"""
WSGI config for soundphy project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "soundphy.settings")
application = get_wsgi_application()
| Soundphy/soundphy | soundphy/wsgi.py | Python | bsd-3-clause | 393 |
# /////////////////////////////////////////////////////////////////////
#
# decode.py : decode DOM/FAWS/SerialID... to readable physical units
#
#
#
#
#
#
#
# ////////////////////////////////////////////////////////////////////
import binascii
from ctypes import create_string_buffer
from math import log10
__author__ = "Yuan Yu"
__version__ = "0.0.1"
__email__ = "[email protected]"
# module ID dictionary, expressed in hex to match the spec
mod_id_dict = {0x00: 'Unknown',
0x01: 'GBIC',
0x02: 'Module soldered to MB',
0x03: 'SFP/SFP+/SFP28',
0x04: '300_Pin_XBI',
0x05: 'XENPAK',
0x06: 'XFP',
0x07: 'XFF',
0x08: 'XFP-E',
0x09: 'XPAK',
0x0A: 'X2',
0x0B: 'DWDM-SFP/SFP+',
0x0C: 'QSFP',
0x0D: 'QSFP+',
0x0E: 'CXP',
0x0F: 'Sheilded Mini Multilane HD 4x',
0x10: 'Sheilded Mini Multilane HD 8x',
0x11: 'QSFP28',
0x12: 'CXP2',
0x13: 'CDFP',
0x14: 'Sheilded Mini Multilane HD 4x Fanout Cable',
0x15: 'Sheilded Mini Multilane HD 8x Fanout Cable',
0x16: 'CDFP',
0x17: 'microQSFP',
# next values are CFP types. Note that their spec
# (CFP MSA Management Interface Specification
# ver 2.4 r06b page 67)
# values overlap with the values for i2c type devices. OOM has
# chosen to add 256 (0x100) to the values to make them unique
0x10E: 'CFP',
0x110: '168_PIN_5X7',
0x111: 'CFP2',
0x112: 'CFP4',
0x113: '168_PIN_4X5',
0x114: 'CFP2_ACO',
}
def get_voltage(x): # return in V
if len(x) != 2:
print "wrong voltage format"
return
temp = ord(x[0])*256 + ord(x[1])
result = float(temp*0.1/1000)
return result
def get_temperature(x): # return in 'C
if len(x) != 2:
print "wrong temperature format"
return
temp = ord(x[0])*256 + ord(x[1])
if temp > 0x7FFF: # if the sign bit is set
temp -= 65536 # take two's complement
result = float(temp/256.0)
return result
# note: get_voltage and get_power are actually identical
# implemented twice so as not to confuse the maintainer :-)
def get_power(x): # return in mW
if len(x) != 2:
print "wrong power format"
return
temp = ord(x[0])*256 + ord(x[1])
result = float(temp*0.1/1000)
return result
def mwtodbm(x):
if x < .001:
return -30 # by convention, -30dbm is the lowest legal value
return 10 * log10(x)
def get_power_dbm(x): # return power in dbm (10*log10(power-in-mw))
return mwtodbm(get_power(x))
def get_current(x): # return in mA
if len(x) != 2:
print "wrong bias format"
return
temp = ord(x[0])*256 + ord(x[1])
result = float(temp/500.0)
return result
def get_signed_current(x): # return in mA
if len(x) != 2:
print "wrong bias format"
return
temp = ord(x[0])*256 + ord(x[1])
if temp > 0x7FFF: # if the sign bit is set
temp -= 65536 # take two's complement
result = float(temp/10.0)
return result
def get_string(x): # copy the cbuffer into a string
result = ''
for c in x:
result += c
return result
def set_string(old, new): # trivial, but needed for oom_set_keyvalue()
return new
def mod_id(x): # return Module ID
return mod_id_dict.get(x, 'Reserved')
def get_bytes(x):
retval = b''
for i in x:
retval += i
return retval
def get_int(x):
result = 0
if len(x) > 4:
print "too many bytes to decode into 32 bit int"
return
for i in x:
result = (result * 256) + ord(i)
return result
def get_intX10(x):
return get_int(x)*10
# return 2 bits
def get2_bits(x, n):
temp = ord(x[0])
temp = temp >> n
temp %= 4
return(temp)
def get2_bit6(x):
return(get2_bits(x, 6))
def get2_bit4(x):
return(get2_bits(x, 4))
def get2_bit2(x):
return(get2_bits(x, 2))
def get2_bit0(x):
return(get2_bits(x, 0))
def get3_bit6(x): # get bits 6, 5, 4
temp = ord(x[0])
temp = temp >> 4
temp %= 8
return(temp)
def get3_bit2(x): # get bits 2, 1, 0
temp = ord(x[0])
temp %= 8
return(temp)
# from 'x', extract 'numbits', starting at 'offset' and going DOWN
# high order is bit 7, low order is bit 0
# so, get_bits(0b00110000, 5, 2) will return 0b11, ie 3
def get_bits(x, offset, numbits):
if (len(x) > 2) or (offset > 15) or (offset < 0) or \
(numbits > 16) or (numbits < 1) or \
((offset - numbits) < -1):
print 'get_bits bad parameters - len(x): %d, offset: %d, numbits: %d'\
% (len(x), offset, numbits)
return
temp = ord(x[0])
if len(x) == 2:
temp *= 256
temp += ord(x[1])
temp = temp >> ((offset + 1) - numbits)
temp %= 2**numbits
return temp
def get_bitrate(x): # returns nominal bit rate IN MB/s
rate = ord(x[0])
# take care here...
# for rates <= 25.4Gb, both SFP and QSFP+ use one byte, in units of 100Mb
# for rates >25.4Gb, both use 0xFF to indicate 'look elsewhere'
# SFP uses byte 66 (vs byte 12), hence offset is 54 bytes
# QSFP uses byte 222 (vs byte 140), hence offset is 82 bytes
# both specify rate in units of 250Mb for extended byte
if (rate == 255):
if (len(x) == 55): # SFP
rate = ord(x[54]) * 250
elif (len(x) == 83): # QSFP+
rate = ord(x[82]) * 250
else:
print "can't decode bit rate"
return
else:
rate = rate * 100
return rate
def get_brmax(x): # returns max bit rate IN MB/s
if (len(x) < 56):
print "can't decode max bit rate"
return
rate = ord(x[0])
# this is tricky... If byte 12 is 0xFF, then the bit rate is in
# byte 66 (in units of 250 MBd), and the limit range of bit rates is
# byte 67, 'in units of +/- 1%'
if rate == 255: # special case, need to use byte 66, 67!
rate = ord(x[54]) # byte 66 is rate in this case
rate_max = rate * (250 + (2.5 * (ord(x[55]))))
# if byte 12 is not 0xFF, then the upper bit rate is in byte 66,
# 'specified in units of 1% above the nominal bit rate'
# remember the rate here is byte 12, raw, it must be multiplied
# by 100. Be careful changing this formula!
else:
rate_max = rate * (100 + ord(x[54]))
return rate_max
def get_brmin(x): # returns minimum bit rate IN MB/s
if (len(x) < 56):
print "can't decode min bit rate"
return
rate = ord(x[0])
# this is tricky... If byte 12 is 0xFF, then the bit rate is in
# byte 66 (in units of 250 MBd), and the limit range of bit rates is
# byte 67, 'in units of +/- 1%'
if rate == 255: # special case, need to use byte 66, 67!
rate = ord(x[54]) # byte 66 is rate in this case
rate_min = rate * (250 - (2.5 * (ord(x[55]))))
# if byte 12 is not 0xFF, then the upper bit rate is in byte 66,
# 'specified in units of 1% above the nominal bit rate'
# remember the rate here is byte 12, raw, it must be multiplied
# by 100. Be careful changing this formula!
else:
rate_min = rate * (100 - ord(x[55]))
return rate_min
def get_length_km(x): # returns supported link length in meters
return ord(x[0]) * 1000
def get_length_100m(x): # returns supported link length in meters
return ord(x[0]) * 100
def get_length_10m(x): # returns supported link length in meters
return ord(x[0]) * 10
def get_length_2m(x): # returns supported link length in meters
return ord(x[0]) * 2
def get_length_omcu(x): # SFP: length in meters, optical OR COPPER
if (len(x) < 11):
print "can't decode OM4/CU max cable length"
return
valid = ord(x[0]) # get byte 8
valid %= 16 # strip bits above 3
valid /= 4 # lose bits below 2
if valid == 0: # if bits 2 and 3 are 0, then optical
return ord(x[10]) * 10 # Optical, stored value is in 10s of meters
return ord(x[1]) # Copper, stored value is in meters
def get_length_omcu2(x): # QSFP+: length in meters, optical OR COPPER
if (len(x) < 2):
print "can't decode OM4/CU max cable length"
return
txtech = ord(x[1])/16 # Transmitter Technology, byte 147, bits 7-4
if txtech == 0: # 850 nm VCSEL
return ord(x[0]) * 2 # OM4, stored value is in units of 2 meters
return ord(x[0]) # Copper, stored value is in meters
def get_wavelength(x): # SFP: requires byte 8 and byte 60, 61
if (len(x) < 54):
print "can't decode wavelength"
return
valid = ord(x[0]) # get byte 8
valid %= 16 # strip bits above 3
valid /= 4 # lose bits below 2
wavelen = 0
if valid == 0: # if bits 2 and 3 are 0, then calculate wavelength
wavelen = ord(x[52])*256 + ord(x[53])
return wavelen
def get_cablespec(x): # requires byte 8 and byte 60, 61
if (len(x) < 54):
print "can't decode cable spec"
return
valid = ord(x[0]) # get byte 8
valid %= 16 # strip bits above 3
valid /= 4 # lose bits below 2
result = x[52:54]
if valid == 0: # optical, cable spec doesn't apply
result = b'\x00\x00'
return result
def get_wavelength2(x): # QSFP: requires byte 147, 186, 187
if (len(x) < 41):
print "can't decode wavelength"
return
txtech = ord(x[1])/16 # Transmitter Technology, byte 147, bits 7-4
if txtech >= 10: # copper technology
return(0)
wavelen = ord(x[39])*256 + ord(x[40])
wavelen = wavelen * 0.05 # value is 20ths of a nanometer!
return wavelen
def get_wave_tol(x): # 2 bytes, in 200ths of a nm, return value in nm
if (len(x) < 2):
print "can't decode wavelength tolerance"
return
wave_tol = ord(x[0])*256 + ord(x[1])
wave_tol = wave_tol * 0.005 # value is 200ths of a nm
return wave_tol
def get_CU_2_5(x): # requires byte 147, 186
if (len(x) < 40):
print "can't decode copper attenuation"
return
txtech = ord(x[1])/16 # Transmitter Technology, byte 147, bits 7-4
if txtech >= 10: # copper technology
return(ord(x[39]))
return 0
def get_CU_5_0(x): # requires byte 147, 187
if (len(x) < 41):
print "can't decode copper attenuation"
return
txtech = ord(x[1])/16 # Transmitter Technology, byte 147, bits 7-4
if txtech >= 10: # copper technology
return(ord(x[40]))
return 0
def get_freq(x): # Extract frequency (CFP)
if (len(x) < 4):
print "can't decode frequency"
return
# low order bits of first two words are freq in THz
freq = ord(x[0]) * 256
freq += ord(x[1])
subfreq = ord(x[2]) * 256
subfreq += ord(x[3])
subfreq *= .00005 # specified by the spec, ie .05 GHz = .00005 THz
freq += subfreq
return freq
def hexstr(x):
result = ''
for i in x:
result += hex(ord(i))
result += ' '
return result
# CFP/MDIO likes to use only the low byte of each word. This function
# squeezes out the zeros in the upper bytes.
def collapse_cfp(data):
if len(data) < 2:
return ''
newdata = create_string_buffer(len(data)/2)
i = 0
for c in data:
if (i % 2) == 1:
newdata[i/2] = c
i += 1
return newdata
# This routine should exactly undo collapse_cfp
# (except, expand_cfp(collapse_cfp(string)) will delete the last byte
# if string is of odd length)
def expand_cfp(data):
newdata = create_string_buffer(len(data)*2)
i = 0
for c in data:
newdata[i*2] = '\0'
newdata[i*2+1] = c
i += 1
return newdata
# Note that set_int returns a 'C string' suitable for oom_set_memory_sff
def set_int(current, new):
retlen = len(current)
retval = create_string_buffer(retlen)
temp = new
for i in range(retlen):
retval[(retlen - 1) - i] = chr(temp % 256)
temp /= 256
return retval
# insert the low order 'numbits' from 'new' into 'current',
# starting at 'offset'. This is the reverse
# of get_bits(x, offset, numbits)
# high order is bit 7, low order is bit 0
def set_bits(current, new, offset, numbits):
if (len(current) != 1) or (offset > 15) or (offset < 0) or \
(numbits > 16) or (numbits < 1) or \
((offset - numbits) < -1) or (new > 0xFFFF):
print 'set_bits bad parameters'
return
tempcurrent = ord(current[0])
# Set the target bits in tempcurrent to all 1s
mask = 0xFF >> 8 - numbits
mask = mask << ((offset + 1) - numbits)
tempcurrent = tempcurrent | mask
# align the new bits, and mask the non-target bits
tempnew = new << ((offset + 1) - numbits)
mask = ~mask & 0xFF
tempnew = tempnew | (mask)
# mash them together
newval = tempnew & tempcurrent
# package the result for oom_set_memory_sff
retval = create_string_buffer(1)
retval[0] = chr(newval)
return retval
# turn a temperature (floating point python) into a 2 byte module
# temperature. [Reverses the calculations of get_temperature()]
def set_temperature(current, new):
if len(current) != 2:
print "wrong temperature format"
return
retval = create_string_buffer(2)
temp = new * 256.0
if temp < 0:
temp += 65536
retval[0] = chr(int(temp / 256))
retval[1] = chr(int(temp % 256))
return retval
| ocpnetworking-wip/oom | oom/decode.py | Python | mit | 13,945 |
from __future__ import print_function
from classifier import get_baseline, get_power, get_ksqi, get_pursqi
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from scipy.stats import entropy
from datetime import datetime
from copy import deepcopy
from utils import *
from parameters import *
import numpy as np
import matplotlib.pyplot as plt
import wfdb
import peakutils
import csv
import os
import json
STD_MULTIPLIER = 1
MIN_DISTANCE_DIFF = 5
# Assuming a max physiologically possible HR of 300
MIN_PEAK_DIST = 60. / 300 * 250
# Assuming a min physiologically possible HR of 30
MAX_PEAK_DIST = 60. / 30 * 250
DEBUG = False
def dprint(*args):
if DEBUG:
for arg in args:
print(arg)
print("")
def is_noisy(
channel_subsig,
checks_to_use,
baseline_threshold=0.75,
power_threshold=0.9,
ksqi_threshold=4,
pursqi_threshold=5
):
checks = []
dprint(get_baseline(channel_subsig), get_power(channel_subsig), get_ksqi(channel_subsig))
# True if passes check
baseline_check = get_baseline(channel_subsig) > baseline_threshold
power_check = get_power(channel_subsig) > power_threshold
ksqi_check = get_ksqi(channel_subsig) > ksqi_threshold
# pursqi_check = get_pursqi(channel_subsig) > pursqi_threshold
# checks = [baseline_check, power_check, ksqi_check, pursqi_check]
# TODO: maybe high pass filter instead of using baseline check as a check
if 'baseline' in checks_to_use:
checks.append(baseline_check)
if 'power' in checks_to_use:
checks.append(power_check)
if 'ksqi' in checks_to_use:
checks.append(ksqi_check)
return not all(checks)
def get_adjusted_ann_indices(annotation, ann_index, start_ratio=1/3.):
a = annotation[ann_index-1]
b = annotation[ann_index]
c = annotation[ann_index+1]
end_ratio = 1-start_ratio
ann_start_index = b - start_ratio*(b-a)
ann_end_index = b + end_ratio*(c-b)
return ann_start_index, ann_end_index
##
# Returns self_beats, a list of:
# annotation index
# beat_sig
# for regular beats detected in own patient's signal
##
def get_self_beats(
channel_sig,
annotation,
sample_name,
checks_to_use=['baseline', 'power', 'ksqi'],
num_self_beats=20,
window_increment=10,
fs=250.):
self_beats = []
# Get self beats in first 2 min
for start_time in range(0, 120-window_increment+1, window_increment):
end_time = start_time + window_increment
start_index = int(start_time * fs)
end_index = int(end_time * fs)
channel_subsig = channel_sig[start_index:end_index]
# print(start_index, end_index,)
if not is_noisy(channel_subsig, checks_to_use):
for ann_index in range(1, len(annotation)-1):
# TODO: update to have the start and end index be smoothed over past values
ann_start_index, ann_end_index = get_adjusted_ann_indices(annotation, ann_index)
# If beat annotation in clean (not noisy) data range
if ann_start_index > start_index and ann_end_index < end_index:
beat_sig = channel_sig[int(ann_start_index):int(ann_end_index)]
peaks = peakutils.indexes(beat_sig, thres=0.75*max(beat_sig), min_dist=MIN_PEAK_DIST)
# if DEBUG:
# plt.figure()
# plt.plot(peaks, [beat_sig[index] for index in peaks], 'ro')
# plt.plot(beat_sig)
# plt.show()
if len(peaks) < 2:
self_beats.append((annotation[ann_index], beat_sig))
if len(self_beats) >= num_self_beats:
break
dprint("Found", len(self_beats), "self beats.")
if DEBUG:
plt.figure()
for i, beat in enumerate(self_beats):
plt.subplot(5, 4, i+1)
plt.plot(beat[1])
plt.show()
return self_beats
def get_best_self_beats(channel_sig, full_annotation, sample_name):
self_beats = get_self_beats(channel_sig, full_annotation, sample_name)
if len(self_beats) == 0:
self_beats = get_self_beats(channel_sig, full_annotation, sample_name, ['power', 'ksqi'])
if len(self_beats) == 0:
self_beats = get_self_beats(channel_sig, full_annotation, sample_name, ['power'])
if len(self_beats) == 0:
dprint("No self beats found for", sample_name)
return self_beats
def normalize_sig(sig):
return (sig - np.mean(sig)) / np.std(sig)
##
# Returns mean and stdev comparing against every other self beat in bank
##
def get_baseline_distances(self_beats, radius=250):
# if DEBUG:
# plt.figure()
# for i, beat in enumerate(self_beats):
# plt.subplot(5, 4, i+1)
# plt.plot(beat[1])
# plt.show()
# Pairwise compare with every other self beat
all_distances = []
for i in range(len(self_beats)):
distances = []
for j in range(len(self_beats)):
if i != j:
i_beat = self_beats[i][1]
j_beat = self_beats[j][1]
distance, path = fastdtw(normalize_sig(i_beat), normalize_sig(j_beat), radius=radius, dist=euclidean)
distances.append(distance)
all_distances.append(distances)
return all_distances
def get_kl_dist(distances):
return [ val if val > 0 else 0.000001 for val in np.histogram(distances, bins=2000)[0] ]
def get_baseline_metrics(metric, baseline_distances):
top_level_distances = []
if metric == 'kl':
flat_distances = [ item for sublist in baseline_distances for item in sublist ]
flat_hist = get_kl_dist(flat_distances)
for sublist in baseline_distances:
sublist_hist = get_kl_dist(sublist)
kl_distance = entropy(sublist_hist, flat_hist)
top_level_distances.append(kl_distance)
elif metric == 'min':
top_level_distances = [ min(sublist) for sublist in baseline_distances ]
elif metric == 'mean':
top_level_distances = [ np.mean(sublist) for sublist in baseline_distances ]
else:
raise Exception("Unrecognized metric: ", metric)
metric_info = [ np.mean(top_level_distances), np.std(top_level_distances) ]
if metric == 'kl':
metric_info.append(deepcopy(baseline_distances))
return metric_info
def get_dtw_distances(beat_sig, self_beats, radius=250):
distances = []
beat_sig_normalized = normalize_sig(beat_sig)
# figure_num = 1
for self_beat in self_beats:
self_beat_normalized = normalize_sig(self_beat[1])
try:
distance, path = fastdtw(beat_sig_normalized, self_beat_normalized, radius=radius, dist=euclidean)
distances.append(distance)
# plt.subplot(5, 4, figure_num)
# plt.title(str(int(distance)))
# plt.plot(self_beat_normalized, 'b-')
# plt.plot(beat_sig_normalized, 'r-')
# plt.axis('off')
# figure_num += 1
except Exception as e:
print(e)
# plt.show()
return distances
##
# Determine if ventricular beat is stdev or not
# metric: string indicating metric ('kl', 'min', 'mean')
# metric info: list of relevant metric info
# if 'kl': [ mean, std, baseline_distances ]
# else: [ mean, std ]
##
def is_ventricular_beat_stdev(beat_sig, self_beats, metric, metric_info, threshold):
plt.figure(figsize=[12, 8])
plt.title(str(metric_info[0]) + " " + str(metric_info[1]))
beat_distances = get_dtw_distances(beat_sig, self_beats)
if len(beat_distances) == 0:
# TODO: maybe return false because probably contains inf/nan which is invalid data
return True
if metric == 'kl':
baseline_distances = metric_info[2]
flat_distances = [ item for sublist in baseline_distances for item in sublist ]
flat_hist = get_kl_dist(flat_distances)
beat_hist = get_kl_dist(beat_distances)
metric_distance = entropy(beat_hist, flat_hist)
elif metric == "min":
metric_distance = min(beat_distances)
elif metric == 'mean':
metric_distance = np.mean(beat_distances)
else:
raise Exception("Unrecognized metric type: ", metric)
dprint("distance: ", metric_distance, metric_distance > threshold)
if metric_distance > threshold:
return True
return False
##
# beats is a list of tuples containing:
# annotation of beat QRS
# start and end indices
# sig of beat
##
def get_ventricular_beats(beats, self_beats, metric, metric_info):
ventricular_beats = []
nonventricular_beats = []
mean = metric_info[0]
std = metric_info[1]
# TODO: optimize hyperparameter STD_MULTIPLIER and MIN_DISTANCE_DIFF
threshold = max(mean + std * STD_MULTIPLIER, mean + MIN_DISTANCE_DIFF)
dprint("mean: ", metric_info[0], "std: ", metric_info[1], "threshold: ", threshold)
for beat in beats:
beat_sig = beat[1]
if is_ventricular_beat_stdev(beat_sig, self_beats, metric, metric_info, threshold):
ventricular_beats.append(beat)
else:
nonventricular_beats.append(beat)
return ventricular_beats, nonventricular_beats
##
# Returns beats (list of tuples):
# annotation of beat QRS
# start and end indices
# sig of beat
##
def get_alarm_beats(channel_sig, annotation):
beats = []
for ann_index in range(1, len(annotation)-1):
# Assumes a beat starts start_ratio (default 1/3) before the annotation
# and ends end_ratio (default 2/3) after annotation
# TODO: update this to update dynamically based on past values
start_index, end_index = get_adjusted_ann_indices(annotation, ann_index)
indices = (start_index, end_index)
beat_sig = channel_sig[int(indices[0]):int(indices[1])]
beat = (annotation[ann_index], beat_sig)
if len(beat_sig) > MIN_PEAK_DIST and len(beat_sig) < MAX_PEAK_DIST:
beats.append(beat)
if DEBUG:
plt.figure()
for i, beat in enumerate(beats):
plt.subplot(5, 4, i+1)
plt.plot(beat[1])
plt.show()
return beats
##
# Plot histogram of all pairwise distances between self beatst
##
def plot_metrics(baseline_distances, metric, metric_info):
flat_distances = [ item for sublist in baseline_distances for item in sublist ]
mean = metric_info[0]
std = metric_info[1]
multipliers = [0.5, 1, 2, 3, 4, 5]
# Plot all flat distances with mean + std + various multipliers
plt.figure()
plt.hist(flat_distances, edgecolor='black')
plt.axvline(mean, color='r')
for multiplier in multipliers:
plt.axvline(x=mean + std*multiplier, color='g')
plt.show()
# Plot individual distance distributions against flat distances
plt.figure(figsize=[12, 8])
for index, distances in enumerate(baseline_distances):
plt.subplot(5, 4, index+1)
plt.hist(flat_distances, color='blue', edgecolor='black')
plt.hist(distances, color='red', edgecolor='black')
if metric == 'min':
plt.axvline(x=min(distances), color='r')
elif metric == 'mean':
plt.axvline(x=np.mean(distances), color='r')
plt.show()
def plot_self_beat_comparison(self_beats):
for i in range(len(self_beats)):
plt.figure(figsize=[12, 8])
figure_num = 1
for j in range(len(self_beats)):
if i != j:
i_beat = self_beats[i][1]
j_beat = self_beats[j][1]
plt.subplot(5, 4, figure_num)
plt.plot(normalize_sig(i_beat), 'b-')
plt.plot(normalize_sig(j_beat), 'r-')
plt.axis('off')
figure_num += 1
plt.show()
def filter_out_nan(beats):
filtered = []
for beat in beats:
beat_sig = beat[1]
if not np.isnan(np.sum(beat_sig)):
filtered.append(beat)
return filtered
def ventricular_beat_annotations_dtw(
channel_sig,
ann_path,
sample_name,
metric,
start_time,
end_time,
ann_type,
force=False,
file_prefix=output_path_std_distances,
ann_fs=250.):
baseline_dist_filename = file_prefix + sample_name + ".json"
dprint("Finding alarm beats...")
annotation = get_annotation(ann_path + sample_name, ann_type, ann_fs, start_time, end_time).annsamp
alarm_beats = get_alarm_beats(channel_sig, annotation)
dprint("Finding self beats...")
# Full annotation except for when the alarm signal starts (usually last 10 seconds)
full_annotation = get_annotation(ann_path + sample_name, ann_type, ann_fs, 0, start_time).annsamp
self_beats = get_best_self_beats(channel_sig, full_annotation, sample_name)
if os.path.isfile(baseline_dist_filename) and not force:
dprint("Loading baseline distances from file...")
with open(baseline_dist_filename, 'r') as f:
baseline_distances = json.load(f)
else:
dprint("Calculating baseline distances...")
baseline_distances = get_baseline_distances(self_beats)
dprint("Writing baseline distances to file...")
with open(baseline_dist_filename, 'w') as f:
json.dump(baseline_distances, f)
try:
dprint("Calculating baseline metrics...")
metric_info = get_baseline_metrics(metric, baseline_distances)
except Exception as e:
print("sample_name: {}".format(sample_name))
print(e)
return [], []
# plot_metrics(baseline_distances, metric, metric_info)
# plot_self_beat_comparison(self_beats)
dprint("Classifying alarm beats...")
ventricular_beats, nonventricular_beats = get_ventricular_beats(alarm_beats, self_beats, metric, metric_info)
vtach_beats = filter_out_nan(ventricular_beats)
# Only find distances if ventricular beats were found
if len(vtach_beats) > 1:
ventricular_distances = get_baseline_distances(vtach_beats)
ventricular_mean, ventricular_std = get_baseline_metrics('min', ventricular_distances)
# If ventricular beats don't look very similar, mark as noise instead
if ventricular_mean > 20 and ventricular_std > 15 and ventricular_mean > ventricular_std:
vtach_beats = []
ventricular_beat_anns = [ beat[0] for beat in vtach_beats ]
nonventricular_beat_anns = [ beat[0] for beat in nonventricular_beats ]
return ventricular_beat_anns, nonventricular_beat_anns
def write_vtach_beats_files(
data_path,
ann_path,
output_path,
ecg_ann_type,
start_time,
end_time,
metric):
for filename in os.listdir(data_path):
if filename.endswith(HEADER_EXTENSION):
sample_name = filename.rstrip(HEADER_EXTENSION)
if sample_name[0] != 'v':
continue
sig, fields = wfdb.srdsamp(data_path + sample_name)
if "II" not in fields['signame']:
print("Lead II not found for sample: {}".format(sample_name))
continue
output_filename = output_path + sample_name + "_1peak_" + metric + ".csv"
if os.path.isfile(output_filename):
continue
channel_index = fields['signame'].index("II")
ann_type = ecg_ann_type + str(channel_index)
start = datetime.now()
with open(output_filename, "w") as f:
channel_sig = sig[:,channel_index]
vtach, nonvtach = ventricular_beat_annotations_dtw(channel_sig, ann_path, sample_name, metric, start_time, end_time, ann_type)
writer = csv.writer(f)
writer.writerow(['ann_index', 'is_true_beat'])
for beat in vtach:
writer.writerow([beat, 1])
for beat in nonvtach:
writer.writerow([beat, 0])
print("sample_name: {}".format(sample_name), end=" ")
print(" elapsed: {}".format(datetime.now() - start))
def run_one_sample():
# sample_name = "v100s" # false alarm
# sample_name = "v141l" # noisy at beginning
# sample_name = "v159l" # quite clean
# sample_name = "v206s" # high baseline
# sample_name = "v143l"
# sample_name = "v696s"
sample_name = "v837l"
metric = "min"
channel_index = 0
ann_fs = 250.
ann_type = 'gqrs' + str(channel_index)
sig, fields = wfdb.srdsamp(data_path + sample_name)
channel_sig = sig[:,channel_index]
vtach_beats, nonvtach_beats = ventricular_beat_annotations_dtw(channel_sig, ann_path, sample_name, metric, start_time, end_time, ann_type)
plt.figure(figsize=[8,5])
plt.plot(channel_sig[int(start_time*250.):int(end_time*250.)],'b-')
plt.plot([ int(index-250.*start_time) for index in nonvtach_beats ], [channel_sig[int(index)] for index in nonvtach_beats], 'bo', markersize=8)
plt.plot([ int(index-250.*start_time) for index in vtach_beats ], [ channel_sig[int(index)] for index in vtach_beats ], 'ro', markersize=8)
plt.show()
if __name__ == '__main__':
start_time = 290
end_time = 300
metric = 'min'
write_vtach_beats_files(data_path, ann_path, output_path_std, ecg_ann_type, start_time, end_time, metric)
# sig, fields = wfdb.rdsamp(data_path + sample_name)
# channel_sig = sig[:,channel_index]
# annotation = wfdb.rdann(ann_path + sample_name, ann_type, sampfrom=start*ann_fs, sampto=end*ann_fs).annsamp
# print(annotation)
# beats = get_beats(channel_sig, annotation)
# for beat in beats:
# indices = beat[0]
# beat_sig = beat[1]
# time_vector = np.linspace(indices[0], indices[1], len(beat_sig))
# whole_sig = channel_sig[250*start:250*end]
# sig_time_vector = np.linspace(250*start, 250*end, len(whole_sig))
# annotation_y = [ channel_sig[ann_t] for ann_t in annotation ]
# plt.figure()
# plt.plot(sig_time_vector, whole_sig, 'b')
# plt.plot(time_vector, beat_sig, 'r')
# plt.plot(annotation, annotation_y, 'go')
# plt.show()
# print("")
# print(annotation[0] / float(250.))
| MIT-LCP/false-alarm-reduction | pyfar/ventricular_beat_stdev.py | Python | mit | 18,573 |
from abaqus import *
from abaqusConstants import *
from regionToolset import Region
def model_create( mdb, model ):
mdb.Model( model.name)
class AModel(object):
def __init__( self ):
self.amodel = amodel
print 'CYLINDER MODULE'
backwardCompatibility.setValues(includeDeprecated=True, reportDeprecated=False)
mdb.saveAs(pathName='C:/Temp/abaqus/cylinder.cae')
#RESETING THE CURRENT VIEWPORT
myView = session.viewports[ session.currentViewportName ]
myView.setValues( displayedObject=None )
#CREATING A NEW MODEL
myMod = mdb.Model(name=MODELNAME)
#DELETING THE DEFAULT MODEL
#del mdb.models['Model-1']
#CREATING A NEW PART
partCyl = myMod.Part( name='Cylinder',
dimensionality=THREE_D,
type=DEFORMABLE_BODY )
#CREATING AN ISOTROPIC MATERIAL
myMat = myMod.Material( name='aluminum' )
elasticProp = ( E, NU )
myMat.Elastic( table=( elasticProp , ) )
#CREATING THE PROPERTY (isotropic shell)
shellSection = myMod.HomogeneousShellSection( name='AluminumPlate',
material='aluminum',
thickness=T )
#CREATING THE SKETCH which will be used to create the shell geometry
s1 = myMod.ConstrainedSketch( name='SketchCylinder',
sheetSize=max( [2.1*R, 1.1*H] ) )
#axis of revolution
s1.ConstructionLine( point1=(0,-H), point2=(0,H) )
#line to be revoluted
s1.Line( point1=(R,-H/2.), point2=(R,H/2.) )
#CREATING A LOCAL COORDINATE SYSTEM TO USE IN THE BOUNDARY CONDITIONS
csysCyl = partCyl.DatumCsysByThreePoints( name='CSYSCylinder',
coordSysType=CYLINDRICAL,
origin=(0,0,0),
point1=(1,0,0),
point2=(1,0,-1) )
#CREATING THE CYLINDER SHELL GEOMETRY
myCyl = partCyl.BaseShellRevolve( sketch=s1,
angle=360.0,
flipRevolveDirection=OFF )
#PROPERTY - assigning the property to the corresponding faces
partCyl.SectionAssignment(
region=Region( faces=partCyl.faces.findAt(((-R,0,0),)) ),
sectionName='AluminumPlate' )
#DEFINING THE MESH SEEDS ALONG ALL EDGES
partCyl.PartitionEdgeByParam( edges=partCyl.edges.findAt( ((R,0,0),) ),
parameter=PLpoint )
partCyl.seedEdgeBySize(edges= partCyl.edges.findAt( ((R,-H/2,0),) ),
size=ELSIZE,
deviationFactor=0.1,
constraint=FINER)
partCyl.seedEdgeBySize(edges= partCyl.edges.findAt( ((R, H/2,0),) ),
size=ELSIZE,
deviationFactor=0.1,
constraint=FINER)
partCyl.seedEdgeBySize(edges= partCyl.edges.findAt( ((R,-H/4,0),) ),
size=ELSIZE,
deviationFactor=0.1,
constraint=FINER)
partCyl.seedEdgeBySize(edges= partCyl.edges.findAt( ((R, H/4,0),) ),
size=ELSIZE,
deviationFactor=0.1,
constraint=FINER)
#ASSEMBLIES adding the cylinder to assembly
instCyl = myMod.rootAssembly.Instance( name='InstanceCylinder',
part=partCyl,
dependent=ON)
#BOUNDARY CONDITIONS
localCSYS = instCyl.datums[1]
#bot boundary conditions
botEdgeArray = instCyl.edges.findAt( ( (-R,-H/2,0 ), ) )
myMod.DisplacementBC( name='BotBC',
createStepName='Initial',
region = Region( edges=botEdgeArray ),
u1=UNSET,
u2=SET,
u3=SET,
ur1=SET,
ur2=UNSET,
ur3=UNSET,
amplitude = UNSET,
distributionType = UNIFORM,
fieldName = '',
localCsys = localCSYS,
#buckleCase=BUCKLING_MODES
) #NOT_APPLICABLE
#top boundary conditions
topEdgeArray = instCyl.edges.findAt( ( (-R, H/2,0 ), ) )
myMod.DisplacementBC( name='TopBC',
createStepName='Initial',
region = Region( edges=topEdgeArray ),
u1=UNSET,
u2=SET,
u3=UNSET,
ur1=SET,
ur2=UNSET,
ur3=UNSET,
amplitude = UNSET,
distributionType = UNIFORM,
fieldName = '',
localCsys = localCSYS,
#buckleCase=BUCKLING_MODES
) #NOT_APPLICABLE
#LOADS
myMod.StaticStep( name='PerturbationStep',
previous='Initial',
nlgeom=True )
#perturbation load
verticePL = instCyl.vertices.findAt( ((R, 0, 0),) )
myMod.ConcentratedForce( name='PerturbationLoad',
createStepName = 'PerturbationStep',
region= Region( vertices=verticePL ),
cf1 = -PLVALUE,
cf2 = 0.,
cf3 = 0. )
#axial load
topEdgeArray = instCyl.edges.findAt( ( (-R, H/2,0 ), ) )
myMod.ShellEdgeLoad(name='Load-3',
createStepName='PerturbationStep',
region=Region( side1Edges=topEdgeArray ),
magnitude=AXIALLOAD,
directionVector=((0.0, 0.0, 0.0), (0.0, -1.0, 0.0)),
distributionType=UNIFORM,
field='',
localCsys=None,
traction=GENERAL,
follower=OFF)
#MESHING THE PART
partCyl.generateMesh()
#CREATING JOB
job = mdb.Job( name =JOBNAME,
model = myMod,
scratch = r'c:\Temp\abaqus\scratch',
memory = 4,
memoryUnits = GIGA_BYTES,
#numCpus = 6,
)
job.writeInput(consistencyChecking=OFF)
mdb.save()
#: The model database has been saved to "C:\Temp\abaqus\test2.cae".
if __name__ == '__main__':
R = 50.
H = 200.
T = 2.
E = 71e3
NU = 0.33
ELSIZE = 2.
PLVALUE = 100.
PLpoint = 0.5 #cylinder height ratio
AXIALLOAD = 1000.
for i in range(10):
PLpoint = 0.05 + 0.1*i
JOBNAME = 'myJob_' + str(i)
MODELNAME = 'Cylinder Model ' + str(i)
isoCylinder( R, H, T, E, NU,
ELSIZE, PLVALUE, PLpoint, AXIALLOAD, JOBNAME, MODELNAME)
| saullocastro/mapy | mapy/writer/abaqus.py | Python | bsd-2-clause | 6,543 |
from abc import ABCMeta, abstractmethod
from ..operator_tools import hilbert_subspace_index
from ..utils import copy_with_new_cache, inspect_repr
class DynamicalModel(object):
"""
Abstract base class defining the DynamicalModel API used by spectroscopy
simulation methods
A DynamicalModel instance completely specifies how a system evolves freely
and in response to applied fields.
To implement a new type of dynamical model, inherit from this class and
override all abstract methods
Parameters
----------
hamiltonian : hamiltonian.Hamiltonian
Hamiltonian object specifying the system
rw_freq : float, optional
Rotating wave frequency at which to calculate dynamics. By default,
the rotating wave frequency is chosen from the central frequency
of the Hamiltonian.
hilbert_subspace : container, default 'ge'
Container of any or all of 'g', 'e' and 'f' indicating the maximum
set of Hilbert subspace on which to calculate the dynamics.
unit_convert : number, optional
Unit conversion from energy to time units (default 1).
Warning
-------
In the current implementation of DynamicalModel, it is assumed that you can
create a modified copy of a dynamical model by merely copying all instance
variables and replcaing the hamiltonian with a modified hamiltonian. If this
is not the case for your subclass, you need to override the
`sample_ensemble` method.
"""
__metaclass__ = ABCMeta
def __init__(self, hamiltonian, rw_freq=None, hilbert_subspace='gef',
unit_convert=1):
self.hamiltonian = hamiltonian.in_rotating_frame(rw_freq)
self.rw_freq = self.hamiltonian.rw_freq
self.hilbert_subspace = hilbert_subspace
self.unit_convert = unit_convert
def __repr__(self):
return inspect_repr(self)
def density_matrix_to_state_vector(self, rho):
"""
turn a density matrix into a state vector to use as the
diff eq initial condition
"""
return rho
def state_vector_density_matrix(self, rho):
"""
turn the diff eq trajectory (list of state vectors) into a
list of density matrices
"""
return rho
@abstractmethod
def thermal_state(self, liouville_subspace):
"""
Thermal state for this dynamical model
"""
@abstractmethod
def equation_of_motion(self, liouville_subspace, heisenberg_picture=False):
"""
Return the equation of motion for this dynamical model in the given
subspace, a function which takes the time and a state vector and returns
the first time derivative of the state vector, for use in a numerical
integration routine
If `heisenberg_picture` is True, returns an equation of motion for
operators in the Heisenberg picture. If a dynamical model does not
implement an equation of motion in the Heisenberg, it will raise a
`NotImplementedError`.
"""
@abstractmethod
def map_between_subspaces(self, state, from_subspace, to_subspace):
"""
Given a state vector `state` defined on `from_subspace` in Liouville
space, return the state mapped onto `to_subspace`.
If any portion of `to_subspace` is not in `from_subspace`, that portion
of the state is assumed to be zero.
"""
def dipole_operator(self, liouv_subspace_map, polarization,
transitions='-+'):
"""
Return a dipole operator that follows the SystemOperator API for the
given liouville_subspace_map, polarization and requested transitions
"""
operator = self.hamiltonian.dipole_operator(self.hilbert_subspace,
polarization, transitions)
return self.system_operator(operator, liouv_subspace_map, self)
def dipole_destroy(self, liouville_subspace_map, polarization):
"""
Return a dipole annhilation operator that follows the SystemOperator API
for the given subspace and polarization
"""
return self.dipole_operator(liouville_subspace_map, polarization, '-')
def dipole_create(self, liouville_subspace_map, polarization):
"""
Return a dipole creation operator that follows the SystemOperator
API for the given liouville_subspace_map and polarization
"""
return self.dipole_operator(liouville_subspace_map, polarization, '+')
def sample_ensemble(self, *args, **kwargs):
"""
Yields `ensemble_size` re-samplings of this dynamical model by sampling
the hamiltonian
"""
for ham in self.hamiltonian.sample_ensemble(*args, **kwargs):
new_dynamical_model = copy_with_new_cache(self)
new_dynamical_model.hamiltonian = ham
yield new_dynamical_model
@property
def time_step(self):
"""
The default time step at which to sample the equation of motion (in the
rotating frame)
"""
return self.hamiltonian.time_step / self.unit_convert
def hilbert_subspace_index(self, subspace):
return self.hamiltonian.hilbert_subspace_index(
subspace, self.hilbert_subspace)
class SystemOperator(object):
"""
Abstract base class defining the SystemOperator API used by
spectroscopy simulation methods
Instances of a SystemOperator class are abstract object whose
commutator and expectation value can be calculated when applied to arbitrary
state vectors in a matching subspace used by DynamicalModel objects.
To implement a new type of system-field operator, inherit from this class
and override all abstract methods.
"""
__metaclass__ = ABCMeta
def commutator(self, state):
"""
Returns the commutator of the system-field operator with the given state
"""
return self.left_multiply(state) - self.right_multiply(state)
@abstractmethod
def left_multiply(self, state):
"""
Returns the left multiplication of the system-field operator with the
given state
"""
@abstractmethod
def right_multiply(self, state):
"""
Returns the right multiplication of the system-field operator with the
given state
"""
@abstractmethod
def expectation_value(self, state):
"""
Returns the expectation value of the system-field operator in the given
state
"""
| shoyer/qspectra | qspectra/dynamics/base.py | Python | bsd-2-clause | 6,614 |
from django.contrib import admin
from custom.m4change.models import McctStatus
class McctStatusAdmin(admin.ModelAdmin):
model = McctStatus
list_display = ('form_id', 'status', 'domain', 'reason', 'received_on', 'registration_date', 'immunized', 'is_booking', 'modified_on', 'user')
search_fields = ('form_id',)
admin.site.register(McctStatus, McctStatusAdmin) | SEL-Columbia/commcare-hq | custom/m4change/admin.py | Python | bsd-3-clause | 374 |
import fileinput
import os
import sys
log_phrase = 'total counts from'
for line in fileinput.input("/tmp/d3s_manager.log"):
if log_phrase in line:
sys.exit()
else:
os.system('sudo reboot')
| tybtab/dosenet-raspberrypi | d3s_monitor.py | Python | mit | 220 |
#-------------------------------------------------------------------------------
# elftools tests
#
# Karl Vogel ([email protected])
# Eli Bendersky ([email protected])
#
# This code is in the public domain
#-------------------------------------------------------------------------------
import unittest
import os
from elftools.elf.elffile import ELFFile
class TestMIPSSupport(unittest.TestCase):
def test_basic(self):
with open(os.path.join('test', 'testfiles_for_unittests',
'simple_gcc.elf.mips'), 'rb') as f:
elf = ELFFile(f)
self.assertEqual(elf.get_machine_arch(), 'MIPS')
# Check some other properties of this ELF file derived from readelf
self.assertEqual(elf['e_entry'], 0x0)
self.assertEqual(elf.num_sections(), 25)
self.assertEqual(elf.num_segments(), 0)
# Test that Mips-specific section types work; these types are
# available only when the file is identified as MIPS in the
# e_machine header field.
sec9 = elf.get_section(9)
self.assertEqual(sec9['sh_type'], 'SHT_MIPS_DWARF')
if __name__ == '__main__':
unittest.main()
| pombredanne/pyelftools | test/test_mips_support.py | Python | unlicense | 1,223 |
#!/usr/bin/env python
"""This modules contains tests for AFF4 API renderers."""
from grr.gui import api_test_lib
from grr.gui.api_plugins import aff4 as aff4_plugin
from grr.lib import aff4
from grr.lib import flags
from grr.lib import test_lib
class ApiAff4RendererTest(test_lib.GRRBaseTest):
"""Test for ApiAff4Renderer."""
def setUp(self):
super(ApiAff4RendererTest, self).setUp()
self.renderer = aff4_plugin.ApiAff4Renderer()
def testRendersAff4ObjectWithGivenPath(self):
with test_lib.FakeTime(42):
with aff4.FACTORY.Create("aff4:/tmp/foo/bar", "AFF4Volume",
token=self.token) as _:
pass
result = self.renderer.Render(
aff4_plugin.ApiAff4RendererArgs(aff4_path="tmp/foo/bar"),
token=self.token)
self.assertEqual(result["urn"], "aff4:/tmp/foo/bar")
self.assertEqual(result["aff4_class"], "AFF4Volume")
self.assertEqual(result["age_policy"], "NEWEST_TIME")
self.assertEqual(result["attributes"]["metadata:last"], {
"value": 42000000,
"type": "RDFDatetime",
"age": 42000000})
class ApiAff4RendererRegressionTest(
api_test_lib.ApiCallRendererRegressionTest):
renderer = "ApiAff4Renderer"
def Run(self):
with test_lib.FakeTime(42):
with aff4.FACTORY.Create("aff4:/foo/bar", "AFF4Object",
mode="rw", token=self.token) as sample_object:
# Add labels to have some attributes filled in.
sample_object.AddLabels("label1", "label2")
self.Check("GET", "/api/aff4/foo/bar")
class ApiAff4IndexRendererTest(test_lib.GRRBaseTest):
"""Test for ApiAff4IndexRendererTest."""
def setUp(self):
super(ApiAff4IndexRendererTest, self).setUp()
self.renderer = aff4_plugin.ApiAff4IndexRenderer()
def testReturnsChildrenListWithTimestamps(self):
with test_lib.FakeTime(42):
with aff4.FACTORY.Create("aff4:/tmp/foo/bar1", "AFF4Volume",
token=self.token) as _:
pass
with test_lib.FakeTime(43):
with aff4.FACTORY.Create("aff4:/tmp/foo/bar2", "AFF4Volume",
token=self.token) as _:
pass
result = self.renderer.Render(
aff4_plugin.ApiAff4IndexRendererArgs(aff4_path="tmp/foo"),
token=self.token)
result = sorted(result, key=lambda x: x[0])
self.assertEqual(result,
[["aff4:/tmp/foo/bar1", 42000000],
["aff4:/tmp/foo/bar2", 43000000]])
class ApiAff4IndexRendererRegressionTest(
api_test_lib.ApiCallRendererRegressionTest):
renderer = "ApiAff4IndexRenderer"
def Run(self):
with test_lib.FakeTime(42):
with aff4.FACTORY.Create("some/path", "AFF4Volume", token=self.token):
pass
with test_lib.FakeTime(43):
with aff4.FACTORY.Create("some/path/foo", "AFF4Volume", token=self.token):
pass
with test_lib.FakeTime(44):
with aff4.FACTORY.Create("some/path/bar", "AFF4Volume", token=self.token):
pass
self.Check("GET", "/api/aff4-index/some/path")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| pombredanne/grr | gui/api_plugins/aff4_test.py | Python | apache-2.0 | 3,171 |
# Implements I/O over asynchronous sockets
from time import time
from sys import exc_info
from traceback import format_exception
from asyncore import socket_map
from asyncore import loop
from pysnmp.carrier.base import AbstractTransportDispatcher
from pysnmp.error import PySnmpError
class AsynsockDispatcher(AbstractTransportDispatcher):
def __init__(self):
self.__sockMap = {} # use own map for MT safety
self.timeout = 0.5
AbstractTransportDispatcher.__init__(self)
def getSocketMap(self): return self.__sockMap
def setSocketMap(self, sockMap=socket_map): self.__sockMap = sockMap
def registerTransport(self, tDomain, t):
AbstractTransportDispatcher.registerTransport(self, tDomain, t)
t.registerSocket(self.__sockMap)
def unregisterTransport(self, tDomain):
self.getTransport(tDomain).unregisterSocket(self.__sockMap)
AbstractTransportDispatcher.unregisterTransport(self, tDomain)
def transportsAreWorking(self):
for transport in self.__sockMap.values():
if transport.writable():
return 1
return 0
def runDispatcher(self, timeout=0.0):
while self.jobsArePending() or self.transportsAreWorking():
try:
loop(timeout and timeout or self.timeout,
use_poll=True, map=self.__sockMap, count=1)
except KeyboardInterrupt:
raise
except:
raise PySnmpError('poll error: %s' % ';'.join(format_exception(*exc_info())))
self.handleTimerTick(time())
| BoundaryDev/boundary-plugin-mongodb-enterprise-dev | pysnmp/carrier/asynsock/dispatch.py | Python | apache-2.0 | 1,605 |
#
# Martin Gracik <[email protected]>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from pykickstart.errors import KickstartParseWarning
from tests.baseclass import CommandTest, CommandSequenceTest
from pykickstart.commands.user import FC6_UserData
from pykickstart.version import FC6
class FC6_TestCase(CommandTest):
command = "user"
def runTest(self):
data1 = FC6_UserData()
data2 = FC6_UserData()
self.assertEqual(data1, data2)
self.assertFalse(data1 != data2) # test __ne__ method
self.assertNotEqual(data1, None)
for attr in ['name']:
setattr(data1, attr, '')
setattr(data2, attr, 'test')
self.assertNotEqual(data1, data2)
self.assertNotEqual(data2, data1)
setattr(data1, attr, '')
setattr(data2, attr, '')
# pass
self.assert_parse("user --name=user", "user --name=user\n")
obj = self.assert_parse("user --name=user --password=\"comment#inpassword\"", "user --name=user --password=\"comment#inpassword\"\n")
self.assertEqual(obj.password, "comment#inpassword")
self.assert_parse("user --name=user --groups=grp1,grp2 --homedir=/home/user --shell=/bin/bash --uid=1000 --password=secret --iscrypted",
"user --groups=grp1,grp2 --homedir=/home/user --name=user --password=secret --iscrypted --shell=/bin/bash --uid=1000\n")
self.assert_parse("user --name=user --groups=grp1", "user --groups=grp1 --name=user\n")
self.assert_parse("user --name=user --homedir=/home/user --shell=/bin/bash", "user --homedir=/home/user --name=user --shell=/bin/bash\n")
self.assert_parse("user --name=user --password=secret", "user --name=user --password=secret\n")
self.assert_parse("user --name=user --uid=1000", "user --name=user --uid=1000\n")
self.assertFalse(self.assert_parse("user --name=user") is None)
self.assertTrue(self.assert_parse("user --name=userA") !=
self.assert_parse("user --name=userB"))
self.assertFalse(self.assert_parse("user --name=userA") ==
self.assert_parse("user --name=userB"))
# fail
# missing required option --name
self.assert_parse_error("user")
# --name requires an argument
self.assert_parse_error("user --name")
# --uid requires int argument
self.assert_parse_error("user --name=user --uid=id")
# unknown option
self.assert_parse_error("user --name=user --unknown=value")
# required option arguments
self.assert_parse_error("user --name=user --groups")
self.assert_parse_error("user --name=user --homedir")
self.assert_parse_error("user --name=user --shell")
self.assert_parse_error("user --name=user --uid")
self.assert_parse_error("user --name=user --password")
# extra test coverage
ud = self.handler().UserData()
ud.uid = ""
ud.name = ""
self.assertEqual(ud.__str__(), "")
self.assertEqual(ud._getArgsAsStr(), "")
cmd = self.handler().commands[self.command]
cmd.userList = [ud]
self.assertEqual(cmd.__str__(), "")
class FC6_Duplicate_TestCase(CommandSequenceTest):
def __init__(self, *args, **kwargs):
CommandSequenceTest.__init__(self, *args, **kwargs)
self.version = FC6
def runTest(self):
# pass - can use the command twice, as long as they have different names
self.assert_parse("""
user --name=userA
user --name=userB""")
# fail - can't have two users with the same name
self.assert_parse_error("""
user --name=userA
user --name=userA""", KickstartParseWarning)
class F8_TestCase(FC6_TestCase):
def runTest(self):
# run FC6 test case
FC6_TestCase.runTest(self)
# pass
self.assert_parse("user --name=user --lock --plaintext", "user --name=user --lock\n")
self.assert_parse("user --name=user --lock", "user --name=user --lock\n")
self.assert_parse("user --name=user --plaintext", "user --name=user\n")
# fail
class F12_TestCase(F8_TestCase):
def runTest(self):
# run F8 test case
F8_TestCase.runTest(self)
# pass
self.assert_parse("user --name=user --gecos=\"User Name\"", "user --name=user --gecos=\"User Name\"\n")
class F19_TestCase(F12_TestCase):
def runTest(self):
# run F12 test case
F12_TestCase.runTest(self)
# pass
self.assert_parse("user --name=user --gid=500", "user --name=user --gid=500\n")
if __name__ == "__main__":
unittest.main()
| bcl/pykickstart | tests/commands/user.py | Python | gpl-2.0 | 5,559 |
# -*- coding: utf-8 -*-
import unittest
from unittest import TestCase
import boto.s3.connection
from boto.s3.key import Key
import urllib, urllib2
import StringIO
s3_cred = { 'host': 'precise64',
'port': 8000,
#'port': 80,
'access_key':'4WLAD43EZZ64EPK1CIRO',
'secret_key':'uGA3yy/NJqITgERIVmr9AgUZRBqUjPADvfQoxpKL',
#'bucket': 'rewrite',
'bucket': 'test1',
}
U_M_LIMIT = 5 * 1024 * 1024
class Tester():
def __init__(self, host, port, akey, skey, bucket, fkey, content, content_type, multipart_file_size):
self.fkey = fkey
self.host = host
self.bucket = bucket
self.content = content
self.content_type = content_type
self.multipart_file_size = multipart_file_size
self.conn = boto.s3.connection.S3Connection(host=host, port=port, is_secure=False, aws_access_key_id=akey,
aws_secret_access_key=skey, calling_format=boto.s3.connection.OrdinaryCallingFormat())
def create_bucket(self):
self.conn.create_bucket(self.bucket)
def delete(self):
bucket_obj = self.conn.get_bucket(self.bucket)
k = Key(bucket_obj)
k.key = self.fkey
bucket_obj.delete_key(k)
def upload(self):
bucket = self.conn.get_bucket(self.bucket)
k = Key(bucket)
k.key = self.fkey
k.set_contents_from_string(self.content, headers={'Content-Type': str(self.content_type)})
def upload_with_headers(self):
bucket = self.conn.get_bucket(self.bucket)
k = Key(bucket)
k.key = self.fkey
headers = {'Content-Type': str(self.content_type),
'x-amz-meta-origin': 'valtest',
'x-amz-meta-origin-a': 'valtest-a'}
k.set_contents_from_string(self.content, headers=headers )
headers = {'Content-Type': str(self.content_type),
'x-amz-meta-origin-a': 'valtest-a'}
k.set_contents_from_string(self.content, headers=headers )
def set_acl(self, policy):
bucket = self.conn.get_bucket(self.bucket)
k = Key(bucket)
k.key = self.fkey
k.set_acl(policy)
def test_upload(self):
self.delete()
self.upload()
self.set_acl('public-read')
bucket = self.conn.get_bucket(self.bucket)
k2 = Key(bucket)
k2.key = self.fkey
if k2.get_contents_as_string()!=self.content:
return False
return True
def test_upload_with_headers(self):
self.delete()
self.upload_with_headers()
self.set_acl('public-read')
bucket = self.conn.get_bucket(self.bucket)
k2 = Key(bucket)
k2.key = self.fkey
if k2.get_contents_as_string()!=self.content:
return False
return True
def test_upload_private_acl(self):
self.delete()
self.upload()
self.set_acl('private')
try:
urllib.urlretrieve('http://'+self.host+'/'+self.fkey)
except urllib2.HTTPError, code:
return False
return True
def test_get_metadata(self):
self.delete()
self.upload()
bucket_obj = self.conn.get_bucket(self.bucket)
k = bucket_obj.get_key(self.fkey)
if 'dict' in str(type(k.metadata)):
return True
return False
def test_delete(self):
self.upload()
self.delete()
return True
def test_public_read_acl(self):
self.delete()
self.upload()
self.set_acl('public-read')
bucket_obj = self.conn.get_bucket(self.bucket)
acl_info = bucket_obj.get_acl(key_name=self.fkey)
S3_PUBLIC_POLICY_URI = 'http://acs.amazonaws.com/groups/global/AllUsers'
for aicg in acl_info.acl.grants:
if aicg.uri == S3_PUBLIC_POLICY_URI:
if aicg.permission == "READ":
return True
return False
def multipart_upload(self):
fh = StringIO.StringIO('a' * self.multipart_file_size)
bucket = self.conn.get_bucket(self.bucket)
key = Key(bucket)
key.key = self.fkey
mp = bucket.initiate_multipart_upload(key)
try:
fh.seek(0, 0)
pos = 0
part_num = 0
while pos < self.multipart_file_size - 1:
if pos + U_M_LIMIT > self.multipart_file_size:
part_size = self.multipart_file_size - pos
else:
part_size = U_M_LIMIT
part_num += 1
mp.upload_part_from_file(fh, part_num, size=part_size)
pos += part_size
mp.complete_upload()
except:
mp.cancel_upload()
raise
return True
def test_multipart_upload(self):
self.multipart_upload()
self.delete()
return True
class BotoTest(TestCase):
def setUp(self):
self.boto_tester = Tester(s3_cred['host'], s3_cred['port'], s3_cred['access_key'],
s3_cred['secret_key'], s3_cred['bucket'], "filenamefile.txt", 'filecontentttttt', 'text/html', U_M_LIMIT + 100)
#s3_cred['secret_key'], s3_cred['bucket'], "filename_ żźćŻŹĆŁÓĄaą.txt", 'filecontentttttt', 'text/html', U_M_LIMIT + 100)
#def test_create_bucket(self):
# self.assertEquals(self.boto_tester.create_bucket(), True)
def test_upload(self):
self.assertEquals(self.boto_tester.test_upload(), True)
def test_upload_with_headers(self):
self.assertEquals(self.boto_tester.test_upload_with_headers(), True)
def test_delete(self):
self.assertEquals(self.boto_tester.test_delete(), True)
def test_public_read_acl(self):
self.assertEquals(self.boto_tester.test_public_read_acl(), True)
def test_upload_private_acl(self):
self.assertEquals(self.boto_tester.test_upload_private_acl(), True)
def test_upload_multipart(self):
self.assertEquals(self.boto_tester.test_multipart_upload(), True)
#---------------------------------------
if __name__ == "__main__":
unittest.main()
| DreamLab/ngx_aws_auth | tests/test.py | Python | bsd-2-clause | 6,274 |
from django.contrib.gis.db.backends.base.features import BaseSpatialFeatures
from django.db.backends.mysql.features import (
DatabaseFeatures as MySQLDatabaseFeatures,
)
from django.utils.functional import cached_property
class DatabaseFeatures(BaseSpatialFeatures, MySQLDatabaseFeatures):
has_spatialrefsys_table = False
supports_add_srs_entry = False
supports_distance_geodetic = False
supports_length_geodetic = False
supports_area_geodetic = False
supports_transform = False
supports_null_geometries = False
supports_num_points_poly = False
unsupported_geojson_options = {'crs'}
@cached_property
def empty_intersection_returns_none(self):
return (
not self.connection.mysql_is_mariadb and
self.connection.mysql_version < (5, 7, 5)
)
@cached_property
def supports_geometry_field_unique_index(self):
# Not supported in MySQL since https://dev.mysql.com/worklog/task/?id=11808
return self.connection.mysql_is_mariadb
@cached_property
def django_test_skips(self):
skips = super().django_test_skips
if (
not self.connection.mysql_is_mariadb and
self.connection.mysql_version < (8, 0, 0)
):
skips.update({
'MySQL < 8 gives different results.': {
'gis_tests.geoapp.tests.GeoLookupTest.test_disjoint_lookup',
},
})
return skips
| elena/django | django/contrib/gis/db/backends/mysql/features.py | Python | bsd-3-clause | 1,481 |
# -*- encoding: utf-8
from sqlalchemy.testing import eq_
from sqlalchemy import *
from sqlalchemy import types, schema, event
from sqlalchemy.databases import mssql
from sqlalchemy.testing import fixtures, AssertsCompiledSQL, \
ComparesTables
from sqlalchemy import testing
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy import util
class ReflectionTest(fixtures.TestBase, ComparesTables):
__only_on__ = 'mssql'
@testing.provide_metadata
def test_basic_reflection(self):
meta = self.metadata
users = Table(
'engine_users',
meta,
Column('user_id', types.INT, primary_key=True),
Column('user_name', types.VARCHAR(20), nullable=False),
Column('test1', types.CHAR(5), nullable=False),
Column('test2', types.Float(5), nullable=False),
Column('test3', types.Text('max')),
Column('test4', types.Numeric, nullable=False),
Column('test5', types.DateTime),
Column('parent_user_id', types.Integer,
ForeignKey('engine_users.user_id')),
Column('test6', types.DateTime, nullable=False),
Column('test7', types.Text('max')),
Column('test8', types.LargeBinary('max')),
Column('test_passivedefault2', types.Integer,
server_default='5'),
Column('test9', types.BINARY(100)),
Column('test_numeric', types.Numeric()),
)
addresses = Table(
'engine_email_addresses',
meta,
Column('address_id', types.Integer, primary_key=True),
Column('remote_user_id', types.Integer,
ForeignKey(users.c.user_id)),
Column('email_address', types.String(20)),
)
meta.create_all()
meta2 = MetaData()
reflected_users = Table('engine_users', meta2,
autoload=True,
autoload_with=testing.db)
reflected_addresses = Table('engine_email_addresses',
meta2, autoload=True, autoload_with=testing.db)
self.assert_tables_equal(users, reflected_users)
self.assert_tables_equal(addresses, reflected_addresses)
@testing.provide_metadata
def test_identity(self):
metadata = self.metadata
table = Table(
'identity_test', metadata,
Column('col1', Integer, Sequence('fred', 2, 3), primary_key=True)
)
table.create()
meta2 = MetaData(testing.db)
table2 = Table('identity_test', meta2, autoload=True)
sequence = isinstance(table2.c['col1'].default, schema.Sequence) \
and table2.c['col1'].default
assert sequence.start == 2
assert sequence.increment == 3
@testing.emits_warning("Did not recognize")
@testing.provide_metadata
def test_skip_types(self):
metadata = self.metadata
testing.db.execute("""
create table foo (id integer primary key, data xml)
""")
t1 = Table('foo', metadata, autoload=True)
assert isinstance(t1.c.id.type, Integer)
assert isinstance(t1.c.data.type, types.NullType)
@testing.provide_metadata
def test_db_qualified_items(self):
metadata = self.metadata
Table('foo', metadata, Column('id', Integer, primary_key=True))
Table('bar', metadata,
Column('id', Integer, primary_key=True),
Column('foo_id', Integer, ForeignKey('foo.id', name="fkfoo"))
)
metadata.create_all()
dbname = testing.db.scalar("select db_name()")
owner = testing.db.scalar("SELECT user_name()")
inspector = inspect(testing.db)
bar_via_db = inspector.get_foreign_keys(
"bar", schema="%s.%s" % (dbname, owner))
eq_(
bar_via_db,
[{
'referred_table': 'foo',
'referred_columns': ['id'],
'referred_schema': 'test.dbo',
'name': 'fkfoo',
'constrained_columns': ['foo_id']}]
)
assert testing.db.has_table("bar", schema="test.dbo")
m2 = MetaData()
Table('bar', m2, schema="test.dbo", autoload=True,
autoload_with=testing.db)
eq_(m2.tables["test.dbo.foo"].schema, "test.dbo")
@testing.provide_metadata
def test_indexes_cols(self):
metadata = self.metadata
t1 = Table('t', metadata, Column('x', Integer), Column('y', Integer))
Index('foo', t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table('t', m2, autoload=True, autoload_with=testing.db)
eq_(
set(list(t2.indexes)[0].columns),
set([t2.c['x'], t2.c.y])
)
@testing.provide_metadata
def test_indexes_cols_with_commas(self):
metadata = self.metadata
t1 = Table('t', metadata,
Column('x, col', Integer, key='x'),
Column('y', Integer)
)
Index('foo', t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table('t', m2, autoload=True, autoload_with=testing.db)
eq_(
set(list(t2.indexes)[0].columns),
set([t2.c['x, col'], t2.c.y])
)
@testing.provide_metadata
def test_indexes_cols_with_spaces(self):
metadata = self.metadata
t1 = Table('t', metadata, Column('x col', Integer, key='x'),
Column('y', Integer))
Index('foo', t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table('t', m2, autoload=True, autoload_with=testing.db)
eq_(
set(list(t2.indexes)[0].columns),
set([t2.c['x col'], t2.c.y])
)
from sqlalchemy.dialects.mssql.information_schema import CoerceUnicode, tables
from sqlalchemy.dialects.mssql import base
class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL):
def test_info_unicode_coercion(self):
dialect = mssql.dialect()
value = CoerceUnicode().bind_processor(dialect)('a string')
assert isinstance(value, util.text_type)
def test_info_unicode_cast_no_2000(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2000_VERSION
stmt = tables.c.table_name == 'somename'
self.assert_compile(
stmt,
"[TABLES_1].[TABLE_NAME] = :table_name_1",
dialect=dialect
)
def test_info_unicode_cast(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2005_VERSION
stmt = tables.c.table_name == 'somename'
self.assert_compile(
stmt,
"[TABLES_1].[TABLE_NAME] = CAST(:table_name_1 AS NVARCHAR(max))",
dialect=dialect
)
class ReflectHugeViewTest(fixtures.TestBase):
__only_on__ = 'mssql'
# crashes on freetds 0.91, not worth it
__skip_if__ = (
lambda: testing.requires.mssql_freetds.enabled,
)
def setup(self):
self.col_num = 150
self.metadata = MetaData(testing.db)
t = Table('base_table', self.metadata,
*[
Column("long_named_column_number_%d" % i, Integer)
for i in range(self.col_num)
]
)
self.view_str = view_str = \
"CREATE VIEW huge_named_view AS SELECT %s FROM base_table" % (
",".join("long_named_column_number_%d" % i
for i in range(self.col_num))
)
assert len(view_str) > 4000
event.listen(t, 'after_create', DDL(view_str) )
event.listen(t, 'before_drop', DDL("DROP VIEW huge_named_view") )
self.metadata.create_all()
def teardown(self):
self.metadata.drop_all()
def test_inspect_view_definition(self):
inspector = Inspector.from_engine(testing.db)
view_def = inspector.get_view_definition("huge_named_view")
eq_(view_def, self.view_str)
| wfxiang08/sqlalchemy | test/dialect/mssql/test_reflection.py | Python | mit | 8,234 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'recipe_engine/buildbucket',
'recipe_engine/context',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
'git',
]
def RunSteps(api):
url = 'https://chromium.googlesource.com/chromium/src.git'
# git.checkout can optionally dump GIT_CURL_VERBOSE traces to a log file,
# useful for debugging git access issues that are reproducible only on bots.
curl_trace_file = None
if api.properties.get('use_curl_trace'):
curl_trace_file = api.path['start_dir'].join('curl_trace.log')
submodule_update_force = api.properties.get('submodule_update_force', False)
submodule_update_recursive = api.properties.get('submodule_update_recursive',
True)
# You can use api.git.checkout to perform all the steps of a safe checkout.
revision = (api.buildbucket.gitiles_commit.ref or
api.buildbucket.gitiles_commit.id)
retVal = api.git.checkout(
url,
ref=revision,
recursive=True,
submodule_update_force=submodule_update_force,
set_got_revision=api.properties.get('set_got_revision'),
curl_trace_file=curl_trace_file,
remote_name=api.properties.get('remote_name'),
display_fetch_size=api.properties.get('display_fetch_size'),
file_name=api.properties.get('checkout_file_name'),
submodule_update_recursive=submodule_update_recursive,
use_git_cache=api.properties.get('use_git_cache'),
tags=api.properties.get('tags'))
assert retVal == "deadbeef", (
"expected retVal to be %r but was %r" % ("deadbeef", retVal))
# count_objects shows number and size of objects in .git dir.
api.git.count_objects(
name='count-objects',
can_fail_build=api.properties.get('count_objects_can_fail_build'),
git_config_options={'foo': 'bar'})
# Get the remote URL.
api.git.get_remote_url(
step_test_data=lambda: api.raw_io.test_api.stream_output('foo'))
api.git.get_timestamp(test_data='foo')
# You can use api.git.fetch_tags to fetch all tags from the remote
api.git.fetch_tags(api.properties.get('remote_name'))
# If you need to run more arbitrary git commands, you can use api.git itself,
# which behaves like api.step(), but automatically sets the name of the step.
with api.context(cwd=api.path['checkout']):
api.git('status')
api.git('status', name='git status can_fail_build',
can_fail_build=True)
api.git('status', name='git status cannot_fail_build',
can_fail_build=False)
# You should run git new-branch before you upload something with git cl.
api.git.new_branch('refactor') # Upstream is origin/master by default.
# And use upstream kwarg to set up different upstream for tracking.
api.git.new_branch('feature', upstream='refactor')
# You can use api.git.rebase to rebase the current branch onto another one
api.git.rebase(name_prefix='my repo', branch='origin/master',
dir_path=api.path['checkout'],
remote_name=api.properties.get('remote_name'))
if api.properties.get('cat_file', None):
step_result = api.git.cat_file_at_commit(api.properties['cat_file'],
revision,
stdout=api.raw_io.output())
if 'TestOutput' in step_result.stdout:
pass # Success!
# Bundle the repository.
api.git.bundle_create(
api.path['start_dir'].join('all.bundle'))
def GenTests(api):
yield api.test('basic')
yield api.test('basic_tags') + api.properties(tags=True)
yield api.test('basic_ref') + api.buildbucket.ci_build(git_ref='refs/foo/bar')
yield api.test('basic_branch') + api.buildbucket.ci_build(
git_ref='refs/heads/testing')
yield api.test('basic_hash') + api.buildbucket.ci_build(
revision='abcdef0123456789abcdef0123456789abcdef01', git_ref=None)
yield api.test('basic_file_name') + api.properties(checkout_file_name='DEPS')
yield api.test('basic_submodule_update_force') + api.properties(
submodule_update_force=True)
yield api.test('platform_win') + api.platform.name('win')
yield (
api.test('curl_trace_file') +
api.properties(use_curl_trace=True) +
api.buildbucket.ci_build(git_ref='refs/foo/bar')
)
yield (
api.test('can_fail_build') +
api.step_data('git status can_fail_build', retcode=1)
)
yield (
api.test('cannot_fail_build') +
api.step_data('git status cannot_fail_build', retcode=1)
)
yield (
api.test('set_got_revision') +
api.properties(set_got_revision=True)
)
yield (
api.test('rebase_failed') +
api.step_data('my repo rebase', retcode=1)
)
yield api.test('remote_not_origin') + api.properties(remote_name='not_origin')
yield (
api.test('count-objects_delta') +
api.properties(display_fetch_size=True))
yield (
api.test('count-objects_failed') +
api.step_data('count-objects', retcode=1))
yield (
api.test('count-objects_with_bad_output') +
api.step_data(
'count-objects',
stdout=api.raw_io.output(api.git.count_objects_output('xxx'))))
yield (
api.test('count-objects_with_bad_output_fails_build') +
api.step_data(
'count-objects',
stdout=api.raw_io.output(api.git.count_objects_output('xxx'))) +
api.properties(count_objects_can_fail_build=True))
yield (
api.test('cat-file_test') +
api.step_data('git cat-file abcdef12345:TestFile',
stdout=api.raw_io.output('TestOutput')) +
api.buildbucket.ci_build(revision='abcdef12345', git_ref=None) +
api.properties(cat_file='TestFile'))
yield (
api.test('git-cache-checkout') +
api.properties(use_git_cache=True))
| endlessm/chromium-browser | third_party/depot_tools/recipes/recipe_modules/git/examples/full.py | Python | bsd-3-clause | 5,942 |
import itertools
from gooey import source_parser, code_prep
import tempfile
__author__ = 'Chris'
"""
Pretty Printing util for inspecting the various ast objects
"""
import ast
from _ast import Assign, Call
def pretty_print(node, indent):
d = node.__dict__
for k, v in d.iteritems():
if isinstance(v, list):
print '-' * indent, k, ": "
for i in v:
pretty_print(i, indent + 2)
elif 'ast' in str(type(v)):
pretty_print(v, indent + 2)
else:
print '-' * indent, k, ": ", v
if __name__ == '__main__':
lines = '''
def main():
x = 1
y = 2
foo, doo = ("poo", "poo")
smarser = argparse.ArgumentParser(description='Example Argparse Program', formatter_class=RawDescriptionHelpFormatter)
random_junk = 123412353454356
smarser.add_argument("filename", help="Name of the file you want to read") # positional'
smarser.add_argument("outfile", help="Name of the file where you'll save the output") # positional
bar = x + y
baz = random_junk * 5
'''
lines2 = '''
def main():
try:
foo, doo = ("poo", "poo")
smarser = argparse.ArgumentParser(description='Example Argparse Program', formatter_class=RawDescriptionHelpFormatter)
smarser.add_argument("filename", help="Name of the file you want to read") # positional'
smarser.add_argument("outfile", help="Name of the file where you'll save the output") # positional
smarser.parse_args()
except:
pass
'''
git_example = '''
from argparse import ArgumentParser
def main():
"""Main"""
bar = 'bar'
print "Hello!"
description='Desc'
parser = ArgumentParser(description=bar)
parser.add_argument(bar, help=('bar')) ##################
return parser
# args = parser.parse_args()
# print(args)
# return True
'''
nodes = ast.parse(git_example)
assign = source_parser.get_nodes_by_instance_type(nodes, Assign)
assignment = source_parser.get_nodes_by_containing_attr(assign, "ArgumentParser")
varname, instruction = code_prep.split_line(source_parser.convert_to_python(assignment)[0])
updated_code = git_example.replace(varname, "jello_maker")
all_code_leading_up_to_parseargs = '\n'.join(itertools.takewhile(lambda line: 'parse_args()' not in line, updated_code.split('\n')))
code = compile(all_code_leading_up_to_parseargs, '', 'exec')
exec(code)
parser = main()
print parser._actions
# print assign[0].value.func.__dict__
# print assign[0].value.keywords[0].value.__dict__
# pretty_print(assign[0], 1)
| garrettcap/Bulletproof-Backup | gooey/dev_utils/ast_inspector.py | Python | gpl-2.0 | 2,590 |
import numpy as np
from scipy import sparse
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import math as mt
# this is the transfer function
def phi(x,theta,uc):
myphi=nu*(x-theta)
myphi[x>uc]=nu*(uc-theta)
myphi[theta>x]=0.
return myphi
def phi_brunel(x,theta,uc):
myphi=nu_brunel*((x-theta)/uc)**2
myphi[x>(uc+theta)]=2*nu_brunel*np.sqrt((x[x>(uc+theta)]-theta)/uc-3./4.)
myphi[theta>x]=0.
return myphi
def phi_tanh(x):
return 0.5*(1+np.tanh(a1*(x+b1)))
# this is the connectivity matrix of the network
def net(wmax,sdel,n,k):
mysdel=np.concatenate([sdel[i]*np.ones(n/k) for i in range(k)])
mysdel=mysdel[0:-1]
mywmax=np.concatenate([wmax[i]*np.ones(n/k) for i in range(k)])
diagonals=[mywmax,mysdel]
return sparse.diags(diagonals,[0,-1])
def net_matrix(wmax,sdel,n,k):
mysdel=np.concatenate([sdel[i]*np.ones(n/k) for i in range(k)])
mysdel=mysdel[0:-1]
mywmax=np.concatenate([wmax[i]*np.ones(n/k) for i in range(k)])
diagonals=[mywmax,mysdel]
vuelta=np.zeros((n,n))
vuelta[0,-1]=0.1
return np.diag(diagonals[0],0)+np.diag(diagonals[1],-1)-w_inh*nu*np.ones((n,n))#+vuelta
#fields approximations
def field_tanh(x,t):
return net(wmax,sdel,n,k).dot(phi_tanh(x))-x-w_inh*np.dot(r1_matrix,phi_tanh(x))
def field_pw(x,t):
return net(wmax,sdel,n,k).dot(phi(x,theta,uc))-x-w_inh*np.dot(r1_matrix,phi(x,theta,uc))
def field_brunel(x,t):
return net(wmax,sdel,n,k).dot(phi_brunel(x,theta,uc))-x-w_inh*np.dot(r1_matrix,phi_brunel(x,theta,uc))
#field true
def field_true_tanh(x,t):
n=len(x)
thefield=np.zeros(n)
thefield[0:n-1]=net(wmax,sdel,n-1,k).dot(phi_tanh(x[0:n-1]))-x[0:n-1]-w_inh*x[-1]*np.ones(n-1)
thefield[-1]=2.*(-x[-1]+np.ones(n-1).dot(phi_tanh(x[0:n-1])))
return thefield
def field_true_pw(x,t):
n=len(x)
thefield=np.zeros(n)
thefield[0:n-1]=net(wmax,sdel,n-1,k).dot(phi(x[0:n-1],theta,uc))-x[0:n-1]-w_inh*x[-1]*np.ones(n-1)
thefield[-1]=2.*(-x[-1]+np.ones(n-1).dot(phi(x[0:n-1],theta,uc)))
return thefield
def field_true_brunel(x,t):
n=len(x)
thefield=np.zeros(n)
thefield[0:n-1]=net(wmax,sdel,n-1,k).dot(phi_brunel(x[0:n-1],theta_brunel,uc_brunel))-x[0:n-1]-w_inh*x[-1]*np.ones(n-1)
thefield[-1]=2.*(-x[-1]+np.ones(n-1).dot(phi_brunel(x[0:n-1],theta_brunel,uc_brunel)))
return thefield
def rk4(f,y0,dt,T):
mysol=[]
mytime=[]
t=0
un=y0
mytime.append(t)
mysol.append(un)
while t<=T:
k1=f(un,t)
k2=f(un+(dt/2)*k1,t+dt/2)
k3=f(un+(dt/2)*k2,t+dt/2)
k4=f(un+dt*k3,t+dt)
un=un+(dt/6)*(k1+2*k2+2*k3+k4)
t=t+dt
mysol.append(un)
mytime.append(t)
print(t)
return np.array(mysol),mytime
n=200
k=200
w_i=1.8
w_inh=w_i/n
nu=2.
nu_brunel=0.4*nu
theta=-0.0
theta_brunel=-0.1
uc=1/nu
uc_brunel=uc
sdelmax=0.6
sdelmin=0.45
wmaxmin=0.05
wmaxmax=0.05
#print(1./(nu+sdel))
a1=6.
b1=-0.25
sdel=np.random.uniform(sdelmin,sdelmax,k)#np.linspace(sdelmin,sdelmax,k)
wmax=np.linspace(wmaxmin,wmaxmax,k)
r1_matrix=np.ones((n,n))#np.outer(np.ones(n),np.random.normal(1,sigma_wi/n,n))
y0=theta*np.ones(n)
y0[0]=1.
y0_true=np.zeros(n+1)
y0_true[0]=1.
tsim=1000.
#approx
#ytanh,timetanh=rk4(field_pw,y0,0.1,tsim)
#ypw,timepw=rk4(field_tanh,y0,0.1,tsim)
#ybrunel,timebrunel=rk4(field_brunel,y0,0.1,tsim)
#true
ytanh_true,timetanh_true=rk4(field_true_pw,y0_true,0.1,tsim)
#ypw_true,timepw_true=rk4(field_true_tanh,y0_true,0.1,tsim)
#ybrunel_true,timebrunel_true=rk4(field_true_brunel,y0_true,0.1,tsim)
#figure
figure=plt.figure()
#connectivity matrix
W01=net_matrix(wmax,sdel,n,k)
matrix_AL=figure.add_subplot(221)
mymatrix=matrix_AL.matshow(W01)
cbaxes = figure.add_axes([0.05, 0.51, 0.03, 0.45])
figure.colorbar(mymatrix,cax=cbaxes)
matrix_AL.set_xlabel('connectivity matrix')
#transfer function
myu=np.linspace(-.5,1.5,200)
tf=figure.add_subplot(222)
l1=tf.plot(myu,phi_tanh(myu),'b')
#l2=tf.plot(myu,phi(myu,theta,uc),'g')
#l3=tf.plot(myu,phi_brunel(myu,theta_brunel,uc_brunel),'r')
tf.set_xlabel('Current')
tf.set_ylabel('Transfer function value ')
#tf.legend(('Sigmoidial','Piecewise','Brunel'),'upper left')
#tf.set_ylim([0,4.2])
#dynamics
dynamics=figure.add_subplot(223)
dynamics.plot(timetanh_true,ytanh_true[:,0:n],'b')
#dynamics.plot(timepw_true,ypw_true[:,0:n],'g')
#dynamics.plot(timebrunel_true,ybrunel_true[:,0:n],'r')
dynamics.set_xlim([0,tsim])
dynamics.set_xlabel('Time')
dynamics.set_ylabel('Exc.Pop. Current')
#dynamics
dynamics2=figure.add_subplot(224)
dynamics2.plot(timetanh_true,ytanh_true[:,0:n],'b')
#dynamics2.plot(timepw_true,ypw_true[:,0:n],'g')
#dynamics2.plot(timebrunel_true,ybrunel_true[:,0:n],'r')
dynamics2.set_xlim([0,15.])
dynamics2.set_xlabel('Time')
dynamics2.set_ylabel('Exc.Pop. Current')
print(sdel)
plt.show()
| ulisespereira/LearningSequences | fixedConnectivity/popModel/sequences_random.py | Python | gpl-2.0 | 4,644 |
from __future__ import absolute_import
from django.core import serializers
from ajax.exceptions import AlreadyRegistered, NotRegistered
from django.db.models.fields import FieldDoesNotExist
from django.db import models
from django.conf import settings
from django.utils.html import escape
from django.db.models.query import QuerySet
from django.utils.encoding import smart_str
import collections
import six
# Used to change the field name for the Model's pk.
AJAX_PK_ATTR_NAME = getattr(settings, 'AJAX_PK_ATTR_NAME', 'pk')
def _fields_from_model(model):
return [field.name for field in model.__class__._meta.fields]
class DefaultEncoder(object):
_mapping = {
'IntegerField': int,
'PositiveIntegerField': int,
'AutoField': int,
'FloatField': float,
}
def to_dict(self, record, expand=False, html_escape=False, fields=None):
self.html_escape = html_escape
if hasattr(record, '__exclude__') and callable(record.__exclude__):
try:
exclude = record.__exclude__()
if fields is None:
fields = _fields_from_model(record)
fields = set(fields) - set(exclude)
except TypeError:
pass
data = serializers.serialize('python', [record], fields=fields)[0]
if hasattr(record, 'extra_fields'):
ret = record.extra_fields
else:
ret = {}
ret.update(data['fields'])
ret[AJAX_PK_ATTR_NAME] = data['pk']
for field, val in six.iteritems(ret):
try:
f = record.__class__._meta.get_field(field)
if expand and isinstance(f, models.ForeignKey):
try:
row = f.rel.to.objects.get(pk=val)
new_value = self.to_dict(row, False)
except f.rel.to.DoesNotExist:
new_value = None # Changed this to None from {} -G
else:
new_value = self._encode_value(f, val)
ret[smart_str(field)] = new_value
except FieldDoesNotExist as e:
pass # Assume extra fields are already safe.
if expand and hasattr(record, 'tags') and \
record.tags.__class__.__name__.endswith('TaggableManager'):
# Looks like this model is using taggit.
ret['tags'] = [{'name': self._escape(t.name),
'slug': self._escape(t.slug)} for t in record.tags.all()]
return ret
__call__ = to_dict
def _encode_value(self, field, value):
if value is None:
return value # Leave all None's as-is as they encode fine.
try:
return self._mapping[field.__class__.__name__](value)
except KeyError:
if isinstance(field, models.ForeignKey):
f = field.rel.to._meta.get_field(field.rel.field_name)
return self._encode_value(f, value)
elif isinstance(field, models.BooleanField):
# If someone could explain to me why the fuck the Python
# serializer appears to serialize BooleanField to a string
# with "True" or "False" in it, please let me know.
return (value == "True" or (type(value) == bool and value))
return self._escape(value)
def _escape(self, value):
if self.html_escape:
return escape(value)
return value
class HTMLEscapeEncoder(DefaultEncoder):
"""Encodes all values using Django's HTML escape function."""
def _escape(self, value):
return escape(value)
class ExcludeEncoder(DefaultEncoder):
def __init__(self, exclude):
self.exclude = exclude
def __call__(self, record, html_escape=False):
fields = set(_fields_from_model(record)) - set(self.exclude)
return self.to_dict(record, html_escape=html_escape, fields=fields)
class IncludeEncoder(DefaultEncoder):
def __init__(self, include):
self.include = include
def __call__(self, record, html_escape=False):
return self.to_dict(record, html_escape=html_escape, fields=self.include)
class Encoders(object):
def __init__(self):
self._registry = {}
def register(self, model, encoder):
if model in self._registry:
raise AlreadyRegistered()
self._registry[model] = encoder
def unregister(self, model):
if model not in self._registry:
raise NotRegistered()
del self._registry[model]
def get_encoder_from_record(self, record):
if isinstance(record, models.Model) and \
record.__class__ in self._registry:
encoder = self._registry[record.__class__]
else:
encoder = DefaultEncoder()
return encoder
def encode(self, record, encoder=None, html_escape=False):
if isinstance(record, collections.Iterable):
ret = []
for i in record:
if not encoder:
encoder = self.get_encoder_from_record(i)
ret.append(self.encode(i, html_escape=html_escape))
else:
if not encoder:
encoder = self.get_encoder_from_record(record)
ret = encoder(record, html_escape=html_escape)
return ret
encoder = Encoders()
| psu-oit/django-ajax | ajax/encoders.py | Python | bsd-3-clause | 5,422 |
import inspect
import re
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.utils.module_loading import import_by_path
from django.middleware.csrf import rotate_token
from .signals import user_logged_in, user_logged_out, user_login_failed
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
return import_by_path(path)()
def get_backends():
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backends.append(load_backend(backend_path))
if not backends:
raise ImproperlyConfigured('No authentication backends have been defined. Does AUTHENTICATION_BACKENDS contain anything?')
return backends
def _clean_credentials(credentials):
"""
Cleans a dictionary of credentials of potentially sensitive info before
sending to less secure functions.
Not comprehensive - intended for user_login_failed signal
"""
SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)
CLEANSED_SUBSTITUTE = '********************'
for key in credentials:
if SENSITIVE_CREDENTIALS.search(key):
credentials[key] = CLEANSED_SUBSTITUTE
return credentials
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend in get_backends():
try:
inspect.getcallargs(backend.authenticate, **credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
try:
user = backend.authenticate(**credentials)
except PermissionDenied:
# This backend says to stop in our tracks - this user should not be allowed in at all.
return None
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__,
credentials=_clean_credentials(credentials))
def login(request, user):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request. Note that data set during
the anonymous session is retained when the user logs in.
"""
if user is None:
user = request.user
# TODO: It would be nice to support different login methods, like signed cookies.
if SESSION_KEY in request.session:
if request.session[SESSION_KEY] != user.pk:
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
request.session[SESSION_KEY] = user.pk
request.session[BACKEND_SESSION_KEY] = user.backend
if hasattr(request, 'user'):
request.user = user
rotate_token(request)
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Removes the authenticated user's ID from the request and flushes their
session data.
"""
# Dispatch the signal before the user is logged out so the receivers have a
# chance to find out *who* logged out.
user = getattr(request, 'user', None)
if hasattr(user, 'is_authenticated') and not user.is_authenticated():
user = None
user_logged_out.send(sender=user.__class__, request=request, user=user)
# remember language choice saved to session
# for backwards compatibility django_language is also checked (remove in 1.8)
language = request.session.get('_language', request.session.get('django_language'))
request.session.flush()
if language is not None:
request.session['_language'] = language
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user_model():
"""
Returns the User model that is active in this project.
"""
from django.db.models import get_model
try:
app_label, model_name = settings.AUTH_USER_MODEL.split('.')
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
user_model = get_model(app_label, model_name)
if user_model is None:
raise ImproperlyConfigured("AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL)
return user_model
def get_user(request):
"""
Returns the user model instance associated with the given request session.
If no user is retrieved an instance of `AnonymousUser` is returned.
"""
from .models import AnonymousUser
try:
user_id = request.session[SESSION_KEY]
backend_path = request.session[BACKEND_SESSION_KEY]
assert backend_path in settings.AUTHENTICATION_BACKENDS
backend = load_backend(backend_path)
user = backend.get_user(user_id) or AnonymousUser()
except (KeyError, AssertionError):
user = AnonymousUser()
return user
def get_permission_codename(action, opts):
"""
Returns the codename of the permission for the specified action.
"""
return '%s_%s' % (action, opts.model_name)
| AlexHill/django | django/contrib/auth/__init__.py | Python | bsd-3-clause | 5,569 |
"""
Serializer for video outline
"""
from edxval.api import ValInternalError, get_video_info_for_course_and_profiles
from rest_framework.reverse import reverse
from courseware.access import has_access
from courseware.courses import get_course_by_id
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from util.module_utils import get_dynamic_descriptor_children
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.mongo.base import BLOCK_TYPES_WITH_CHILDREN
class BlockOutline(object):
"""
Serializes course videos, pulling data from VAL and the video modules.
"""
def __init__(self, course_id, start_block, block_types, request, video_profiles):
"""Create a BlockOutline using `start_block` as a starting point."""
self.start_block = start_block
self.block_types = block_types
self.course_id = course_id
self.request = request # needed for making full URLS
self.local_cache = {}
try:
self.local_cache['course_videos'] = get_video_info_for_course_and_profiles(
unicode(course_id), video_profiles
)
except ValInternalError: # pragma: nocover
self.local_cache['course_videos'] = {}
def __iter__(self):
def parent_or_requested_block_type(usage_key):
"""
Returns whether the usage_key's block_type is one of self.block_types or a parent type.
"""
return (
usage_key.block_type in self.block_types or
usage_key.block_type in BLOCK_TYPES_WITH_CHILDREN
)
def create_module(descriptor):
"""
Factory method for creating and binding a module for the given descriptor.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_id, self.request.user, descriptor, depth=0,
)
course = get_course_by_id(self.course_id)
return get_module_for_descriptor(
self.request.user, self.request, descriptor, field_data_cache, self.course_id, course=course
)
with modulestore().bulk_operations(self.course_id):
child_to_parent = {}
stack = [self.start_block]
while stack:
curr_block = stack.pop()
if curr_block.hide_from_toc:
# For now, if the 'hide_from_toc' setting is set on the block, do not traverse down
# the hierarchy. The reason being is that these blocks may not have human-readable names
# to display on the mobile clients.
# Eventually, we'll need to figure out how we want these blocks to be displayed on the
# mobile clients. As they are still accessible in the browser, just not navigatable
# from the table-of-contents.
continue
if curr_block.location.block_type in self.block_types:
if not has_access(self.request.user, 'load', curr_block, course_key=self.course_id):
continue
summary_fn = self.block_types[curr_block.category]
block_path = list(path(curr_block, child_to_parent, self.start_block))
unit_url, section_url = find_urls(self.course_id, curr_block, child_to_parent, self.request)
yield {
"path": block_path,
"named_path": [b["name"] for b in block_path],
"unit_url": unit_url,
"section_url": section_url,
"summary": summary_fn(self.course_id, curr_block, self.request, self.local_cache)
}
if curr_block.has_children:
children = get_dynamic_descriptor_children(
curr_block,
self.request.user.id,
create_module,
usage_key_filter=parent_or_requested_block_type
)
for block in reversed(children):
stack.append(block)
child_to_parent[block] = curr_block
def path(block, child_to_parent, start_block):
"""path for block"""
block_path = []
while block in child_to_parent:
block = child_to_parent[block]
if block is not start_block:
block_path.append({
# to be consistent with other edx-platform clients, return the defaulted display name
'name': block.display_name_with_default_escaped,
'category': block.category,
'id': unicode(block.location)
})
return reversed(block_path)
def find_urls(course_id, block, child_to_parent, request):
"""
Find the section and unit urls for a block.
Returns:
unit_url, section_url:
unit_url (str): The url of a unit
section_url (str): The url of a section
"""
block_path = []
while block in child_to_parent:
block = child_to_parent[block]
block_path.append(block)
block_list = list(reversed(block_path))
block_count = len(block_list)
chapter_id = block_list[1].location.block_id if block_count > 1 else None
section = block_list[2] if block_count > 2 else None
position = None
if block_count > 3:
position = 1
for block in section.children:
if block.block_id == block_list[3].url_name:
break
position += 1
kwargs = {'course_id': unicode(course_id)}
if chapter_id is None:
course_url = reverse("courseware", kwargs=kwargs, request=request)
return course_url, course_url
kwargs['chapter'] = chapter_id
if section is None:
chapter_url = reverse("courseware_chapter", kwargs=kwargs, request=request)
return chapter_url, chapter_url
kwargs['section'] = section.url_name
section_url = reverse("courseware_section", kwargs=kwargs, request=request)
if position is None:
return section_url, section_url
kwargs['position'] = position
unit_url = reverse("courseware_position", kwargs=kwargs, request=request)
return unit_url, section_url
def video_summary(video_profiles, course_id, video_descriptor, request, local_cache):
"""
returns summary dict for the given video module
"""
always_available_data = {
"name": video_descriptor.display_name,
"category": video_descriptor.category,
"id": unicode(video_descriptor.scope_ids.usage_id),
"only_on_web": video_descriptor.only_on_web,
}
all_sources = []
if video_descriptor.only_on_web:
ret = {
"video_url": None,
"video_thumbnail_url": None,
"duration": 0,
"size": 0,
"transcripts": {},
"language": None,
"all_sources": all_sources,
}
ret.update(always_available_data)
return ret
# Get encoded videos
video_data = local_cache['course_videos'].get(video_descriptor.edx_video_id, {})
# Get highest priority video to populate backwards compatible field
default_encoded_video = {}
if video_data:
for profile in video_profiles:
default_encoded_video = video_data['profiles'].get(profile, {})
if default_encoded_video:
break
if default_encoded_video:
video_url = default_encoded_video['url']
# Then fall back to VideoDescriptor fields for video URLs
elif video_descriptor.html5_sources:
video_url = video_descriptor.html5_sources[0]
all_sources = video_descriptor.html5_sources
else:
video_url = video_descriptor.source
if video_descriptor.source:
all_sources.append(video_descriptor.source)
# Get duration/size, else default
duration = video_data.get('duration', None)
size = default_encoded_video.get('file_size', 0)
# Transcripts...
transcripts_info = video_descriptor.get_transcripts_info()
transcript_langs = video_descriptor.available_translations(transcripts=transcripts_info)
transcripts = {
lang: reverse(
'video-transcripts-detail',
kwargs={
'course_id': unicode(course_id),
'block_id': video_descriptor.scope_ids.usage_id.block_id,
'lang': lang
},
request=request,
)
for lang in transcript_langs
}
ret = {
"video_url": video_url,
"video_thumbnail_url": None,
"duration": duration,
"size": size,
"transcripts": transcripts,
"language": video_descriptor.get_default_transcript_language(transcripts_info),
"encoded_videos": video_data.get('profiles'),
"all_sources": all_sources,
}
ret.update(always_available_data)
return ret
| teltek/edx-platform | lms/djangoapps/mobile_api/video_outlines/serializers.py | Python | agpl-3.0 | 9,122 |
from collections import OrderedDict
import time
from itertools import islice
import threading
import weakref
from contextlib import contextmanager
def lru_cache_function(max_size=1024, expiration=15*60, **kwargs):
"""
>>> @lru_cache_function(max_size=3, expiration=1)
... def f(x):
... print("Calling f(" + str(x) + ")")
... return x
>>> f(3)
Calling f(3)
3
>>> f(3)
3
"""
def wrapper(func):
return LRUCachedFunction(func, LRUCacheDict(
max_size=max_size, expiration=expiration, **kwargs))
return wrapper
def _lock_decorator(func):
"""
If the LRUCacheDict is concurrent, then we should lock in order to avoid
conflicts with threading, or the ThreadTrigger.
"""
def withlock(self, *args, **kwargs):
if self.concurrent:
with self._rlock:
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
withlock.__name__ == func.__name__
return withlock
class LRUCacheDict(object):
""" A dictionary-like object, supporting LRU caching semantics.
>>> d = LRUCacheDict(max_size=3, expiration=3)
>>> d['foo'] = 'bar'
>>> d['foo']
'bar'
>>> import time
>>> time.sleep(4) # 4 seconds > 3 second cache expiry of d
>>> d['foo']
Traceback (most recent call last):
...
KeyError: 'foo'
>>> d['a'] = 'A'
>>> d['b'] = 'B'
>>> d['c'] = 'C'
>>> d['d'] = 'D'
>>> d['a'] # Should return value error, since we exceeded the max cache size
Traceback (most recent call last):
...
KeyError: 'a'
By default, this cache will only expire items whenever you poke it - all methods on
this class will result in a cleanup. If the thread_clear option is specified, a background
thread will clean it up every thread_clear_min_check seconds.
If this class must be used in a multithreaded environment, the option concurrent should be
set to true. Note that the cache will always be concurrent if a background cleanup thread
is used.
"""
def __init__(self, max_size=1024, expiration=15*60, thread_clear=False, thread_clear_min_check=60, concurrent=False):
self.max_size = max_size
self.expiration = expiration
self.__values = {}
self.__expire_times = OrderedDict()
self.__access_times = OrderedDict()
self.thread_clear = thread_clear
self.concurrent = concurrent or thread_clear
if self.concurrent:
self._rlock = threading.RLock()
if thread_clear:
t = self.EmptyCacheThread(self)
t.start()
class EmptyCacheThread(threading.Thread):
daemon = True
def __init__(self, cache, peek_duration=60):
me = self
def kill_self(o):
me
self.ref = weakref.ref(cache)
self.peek_duration = peek_duration
super(LRUCacheDict.EmptyCacheThread, self).__init__()
def run(self):
while self.ref():
c = self.ref()
if c:
next_expire = c.cleanup()
if (next_expire is None):
time.sleep(self.peek_duration)
else:
time.sleep(next_expire+1)
c = None
@_lock_decorator
def size(self):
return len(self.__values)
@_lock_decorator
def clear(self):
"""
Clears the dict.
>>> d = LRUCacheDict(max_size=3, expiration=1)
>>> d['foo'] = 'bar'
>>> d['foo']
'bar'
>>> d.clear()
>>> d['foo']
Traceback (most recent call last):
...
KeyError: 'foo'
"""
self.__values.clear()
self.__expire_times.clear()
self.__access_times.clear()
def __contains__(self, key):
return self.has_key(key)
@_lock_decorator
def has_key(self, key):
"""
This method should almost NEVER be used. The reason is that between the time
has_key is called, and the key is accessed, the key might vanish.
You should ALWAYS use a try: ... except KeyError: ... block.
>>> d = LRUCacheDict(max_size=3, expiration=1)
>>> d['foo'] = 'bar'
>>> d['foo']
'bar'
>>> import time
>>> if d.has_key('foo'):
... time.sleep(2) #Oops, the key 'foo' is gone!
... d['foo']
Traceback (most recent call last):
...
KeyError: 'foo'
"""
return key in self.__values
@_lock_decorator
def __setitem__(self, key, value):
t = int(time.time())
self.__delitem__(key)
self.__values[key] = value
self.__access_times[key] = t
self.__expire_times[key] = t + self.expiration
self.cleanup()
@_lock_decorator
def __getitem__(self, key):
t = int(time.time())
del self.__access_times[key]
self.__access_times[key] = t
self.cleanup()
return self.__values[key]
@_lock_decorator
def __delitem__(self, key):
if key in self.__values:
del self.__values[key]
del self.__expire_times[key]
del self.__access_times[key]
@_lock_decorator
def cleanup(self):
if self.expiration is None:
return None
t = int(time.time())
#Delete expired
keys_to_delete = []
next_expire = None
for k in self.__expire_times:
if self.__expire_times[k] < t:
keys_to_delete.append(k)
else:
next_expire = self.__expire_times[k]
break
while keys_to_delete:
self.__delitem__(keys_to_delete.pop())
#If we have more than self.max_size items, delete the oldest
while (len(self.__values) > self.max_size):
keys_to_delete = []
for k in self.__access_times:
keys_to_delete.append(k)
break
while keys_to_delete:
self.__delitem__(keys_to_delete.pop())
if not (next_expire is None):
return next_expire - t
else:
return None
class LRUCachedFunction(object):
"""
A memoized function, backed by an LRU cache.
>>> def f(x):
... print("Calling f(" + str(x) + ")")
... return x
>>> f = LRUCachedFunction(f, LRUCacheDict(max_size=3, expiration=3) )
>>> f(3)
Calling f(3)
3
>>> f(3)
3
>>> import time
>>> time.sleep(4) #Cache should now be empty, since expiration time is 3.
>>> f(3)
Calling f(3)
3
>>> f(4)
Calling f(4)
4
>>> f(5)
Calling f(5)
5
>>> f(3) #Still in cache, so no print statement. At this point, 4 is the least recently used.
3
>>> f(6)
Calling f(6)
6
>>> f(4) #No longer in cache - 4 is the least recently used, and there are at least 3 others items in cache [3,4,5,6].
Calling f(4)
4
"""
def __init__(self, function, cache=None):
if cache:
self.cache = cache
else:
self.cache = LRUCacheDict()
self.function = function
self.__name__ = self.function.__name__
def __call__(self, *args, **kwargs):
key = repr( (args, kwargs) ) + "#" + self.__name__ #In principle a python repr(...) should not return any # characters.
try:
return self.cache[key]
except KeyError:
value = self.function(*args, **kwargs)
self.cache[key] = value
return value
if __name__ == "__main__":
import doctest
doctest.testmod()
| stucchio/Python-LRU-cache | lru/__init__.py | Python | gpl-3.0 | 7,735 |
#!/usr/bin/python
import commons
from espeak import espeak
import mosquitto
import subprocess
from os import listdir
import random
from os.path import join
from twython import Twython
import ConfigParser
#import time
import moc
import math
from datetime import *
from pytz import timezone
import calendar
from dateutil.relativedelta import *
config = ConfigParser.ConfigParser()
config.read("config.ini")
CONSUMER_KEY = config.get("TWYTHON","CONSUMER_KEY")
CONSUMER_SECRET = config.get("TWYTHON","CONSUMER_SECRET")
ACCESS_KEY = config.get("TWYTHON","ACCESS_KEY")
ACCESS_SECRET = config.get("TWYTHON","ACCESS_SECRET")
api = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_KEY,ACCESS_SECRET)
rndspeak = ["seenee","hai","hallo","bip bip","robot","yuhu","ea","oi","we","oh","aah"]
folder = "/home/pi/cipi/sounds/"
files = listdir(folder)
plfolder = "/home/pi/cipi/playlist/"
playlist = listdir(plfolder)
language = "en"
musiclist = []
atime = commons.getmillis()
stoptime = commons.getmillis()
stoptimeb = commons.getmillis()
# start the moc server
try:
moc.start_server()
except:
pass
def dt(str):
r = datetime.strptime(str,"%Y-%m-%d")
return r
def get_cal():
f = file("agenda.txt","rb")
ap = f.readlines()
data = []
for dt in ap:
data.append(dt.split(" "))
return data
#SPEAK EVENT FOR TODAY
def today_event():
today = datetime.today()
now = datetime.now()
for dt in mycal:
print dt
ev_dt=datetime.strptime(dt[0]+" "+dt[1],"%Y-%m-%d %H:%M")
evnt = dt[6]
if ev_dt.date() == today.date():
espeak.synth("dont forget to " + evnt +"\n" )
#COMPARE HALF HOUR EVENT
def event_reminder():
today = datetime.today()
now = datetime.now()
for dt in mycal:
ev_dt=datetime.strptime(dt[0]+" "+dt[1],"%Y-%m-%d %H:%M")
evnt = dt[6]
if ev_dt.date() == today.date():
if ev_dt > now:
intime = int(math.floor((ev_dt - now).seconds / 60))
if intime < 300:
data = evnt + ", event in " + str(intime) + " minutes"
espeak.synth(data)
def event_ongoing():
today = datetime.today()
now = datetime.now()
for dt in mycal:
ev_fr=datetime.strptime(dt[0]+" "+dt[1],"%Y-%m-%d %H:%M")
ev_to=datetime.strptime(dt[2]+" "+dt[3],"%Y-%m-%d %H:%M")
evnt = dt[6]
if ev_fr < now:
if ev_to > now:
data = "Do "+evnt+" now"
espeak.synth(data)
#RETRIEVE CALENDAR FROM GOOGLE CAL AND WRITE TO FILE
def retrieve_agenda():
try:
mycmd = "gcalget.sh"
subprocess.call(["sh",mycmd])
except:
espeak.synth("calendar error")
def parsemusic(dat):
f = file(dat,"r")
a = f.readlines()
try:
a.remove('\n')
except:
pass
return a
def on_connect(mosq, obj, rc):
mosq.subscribe("speak", 0)
mosq.subscribe("sound", 0)
mosq.subscribe("tweet", 0)
mosq.subscribe("teleop", 0)
mosq.subscribe("wii",0)
print("rc: "+str(rc))
def on_message(mosq, obj, msg):
global folder
global files
global language
global api
global stoptime
#routing dari teleop/wii ke topic MOTOR
if msg.topic == "teleop":
try:
mqttc.publish("motor",msg.payload)
a = 0
except:
pass
return
if msg.topic == "wii":
try:
mqttc.publish("motor",msg.payload)
a = 0
except:
pass
#process topic tweet
if msg.topic == "tweet":
try:
api.update_status(status=str(msg.payload))
espeak.synth("Tweeted")
except:
espeak.synth("Tweet failed")
return
#process topic speak
if msg.topic == "speak":
#print(msg.topic+" "+str(msg.qos)+" "+str(msg.payload))
if msg.payload == "en2":
language = "en2"
return
elif msg.payload == "en":
language = "en"
return
elif msg.payload == "ko":
language = "ko"
return
elif msg.payload == "id":
language = "id"
return
elif msg.payload == "rnd":
espeak.synth(random.choice(rndspeak))
return
#incoming from wii
if msg.payload == "music start":
espeak.synth("play music")
musiclist = parsemusic(join(plfolder,random.choice(playlist)))
moc.quickplay(musiclist)
return
elif msg.payload == "music stop":
moc.stop()
espeak.synth("music stop")
return
elif msg.payload == "volume up":
moc.increase_volume(10)
return
elif msg.payload == "volume down":
moc.decrease_volume(10)
return
elif msg.payload == "next music":
moc.next()
return
elif msg.payload == "previous music":
moc.previous()
return
elif msg.payload == "toggle shuffle":
moc.toggle_shuffle()
return
elif msg.payload == "enable shuffle":
moc.enable_shuffle()
return
elif msg.payload == "disable shuffle":
moc.disable_shuffle()
return
elif msg.payload == "main++":
espeak.synth("run main")
commons.run_main()
return
elif msg.payload == "main--":
espeak.synth("kill main")
commons.kill_main()
return
elif msg.payload == "display++":
espeak.synth("display plus plus")
return
elif msg.payload == "display--":
espeak.synth("display minus minus")
return
elif msg.payload == "light++":
espeak.synth("light plus plus")
return
elif msg.payload == "light--":
espeak.synth("light minus minus")
return
elif msg.payload == "print++":
espeak.synth("print plus plus")
return
elif msg.payload == "print--":
espeak.synth("print minus minus")
return
if language == "en":
espeak.synth(msg.payload)
elif language == "ko":
subprocess.call(["/home/pi/cipi/speech_ko.sh",msg.payload])
elif language == "id":
subprocess.call(["/home/pi/cipi/speech_id.sh",msg.payload])
elif language == "en2":
subprocess.call(["/home/pi/cipi/speech_en.sh",msg.payload])
#process topic sound
if msg.topic == "sound":
if msg.payload == "rnd":
subprocess.call(["aplay",join(folder,random.choice(files))])
else:
subprocess.call(["aplay",msg.payload])
def on_publish(mosq, obj, mid):
pass
def on_subscribe(mosq, obj, mid, granted_qos):
print("Subscribed: "+str(mid)+" "+str(granted_qos))
def on_log(mosq, obj, level, string):
print(string)
# If you want to use a specific client id, use
# mqttc = mosquitto.Mosquitto("client-id")
# but note that the client id must be unique on the broker. Leaving the client
# id parameter empty will generate a random id for you.
mqttc = mosquitto.Mosquitto()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
# Uncomment to enable debug messages
#mqttc.on_log = on_log
mqttc.connect("127.0.0.1", 1883, 60)
#mqttc.subscribe("string", 0)
#mqttc.subscribe(("tuple", 1))
#mqttc.subscribe([("list0", 0), ("list1", 1)])
#Speak agenda
retrieve_agenda()
mycal = get_cal()
today_event()
while True:
mqttc.loop()
#loop reminder for every 5 minutes
btime = commons.getmillis()
if btime-atime > 300000:
atime = commons.getmillis()
event_reminder()
event_ongoing()
#loop timer untuk stop motor tiap 10 detik / safety
stoptimeb = commons.getmillis()
if stoptimeb-stoptime > 5000:
stoptime = commons.getmillis()
mqttc.publish("motor","1:1:0:0:#")
| irzaip/cipi | cp_speak.py | Python | lgpl-3.0 | 7,663 |
from Application import app
app.run(debug=True) | MishaGarbuz/WinVault | app.py | Python | mit | 48 |
# -*- coding: utf-8 -*-
#from django_tables2.utils import A # alias for Accessor
#from django.db.models import Sum
from django_tables2_reports.tables import TableReport
import django_tables2 as tables
from core.models import *
from seafood.models import *
from genotype.models import *
class ObTable(TableReport):
fields = []
values = []
sterm = ''
class Meta:
attrs = {"class": "paleblue"}
exclude = ("obkeywords", "search_index", "values", )
class SpeciesTable(TableReport):
class Meta:
attrs = {"class": "paleblue"}
class CategoryTable(TableReport):
sterm = None
class Meta:
attrs = {"class": "paleblue"}
exclude = ("obkeywords", "search_index", )
class FishTable(ObTable):
class Meta(ObTable.Meta):
model = Fish
| hdzierz/Kaka | web/tables.py | Python | gpl-2.0 | 818 |
# -*- coding: utf-8 -*-
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tests', '0047_restaurant_tags'),
]
operations = [
migrations.CreateModel(
name='ImportantPages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('general_terms_page', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page')),
('privacy_policy_page', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page')),
('sign_up_page', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page')),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')),
],
options={
'abstract': False,
},
),
]
| zerolab/wagtail | wagtail/tests/testapp/migrations/0048_importantpages.py | Python | bsd-3-clause | 1,135 |
#! /usr/bin/env python3
"""
pcom.py test cases
"""
import os
import unittest
import shutil
from utils import pcom
class TestGenCfg(unittest.TestCase):
"""test case class for gen_cfg function"""
def setUp(self):
self.tmp_cfg1_file = "/tmp/test_pcom1.cfg"
self.tmp_cfg2_file = "/tmp/test_pcom2.cfg"
cfg1_str = """\
[section1]
key1 = val1, val2
key2 = val3,
val4
[section2]
key3 = val5
val6
key4 = val7=val8
[section3]
key5 =
# key6
key7
"""
cfg2_str = """\
[section3]
key7 = val9
key8
[section4]
key9 = `~!@#$%%^&*()-_=+[{]}|;:'",<.>/?
"""
with open(self.tmp_cfg1_file, "w") as tc1f:
tc1f.write(cfg1_str)
with open(self.tmp_cfg2_file, "w") as tc2f:
tc2f.write(cfg2_str)
def test_gen_cfg(self):
"""test case"""
cfg = pcom.gen_cfg([self.tmp_cfg1_file, self.tmp_cfg2_file])
self.assertEqual(cfg["section1"]["key1"], "val1, val2")
self.assertEqual(cfg["section1"]["key2"], f"val3,{os.linesep}val4")
self.assertEqual(cfg["section2"]["key3"], f"val5{os.linesep}val6")
self.assertEqual(cfg["section2"]["key4"], "val7=val8")
self.assertEqual(cfg["section3"]["key5"], "")
self.assertNotIn("key6", cfg["section3"])
self.assertEqual(cfg["section3"]["key7"], "val9")
self.assertEqual(cfg["section3"]["key8"], None)
self.assertEqual(cfg["section4"]["key9"], """`~!@#$%^&*()-_=+[{]}|;:'",<.>/?""")
def tearDown(self):
os.remove(self.tmp_cfg1_file)
os.remove(self.tmp_cfg2_file)
del self.tmp_cfg1_file
del self.tmp_cfg2_file
class TestFindIter(unittest.TestCase):
"""test case for find_iter function"""
def setUp(self):
self.base_dir = "/tmp/test_pcom"
os.makedirs(self.base_dir)
def test_find_iter(self):
"""test case"""
test_dir_tup = (
f"{self.base_dir}{os.sep}test1",
f"{self.base_dir}{os.sep}test2",
f"{self.base_dir}{os.sep}test3")
test_tup = (
f"{self.base_dir}{os.sep}test.log",
f"{self.base_dir}{os.sep}test.txt",
f"{self.base_dir}{os.sep}test.cfg")
test1_tup = (
f"{test_dir_tup[0]}{os.sep}test1.log",
f"{test_dir_tup[0]}{os.sep}test1.txt",
f"{test_dir_tup[0]}{os.sep}test1.cfg")
test2_tup = (
f"{test_dir_tup[1]}{os.sep}test2.log",
f"{test_dir_tup[1]}{os.sep}test2.txt",
f"{test_dir_tup[1]}{os.sep}test2.cfg")
test3_tup = (
f"{test_dir_tup[2]}{os.sep}test3.log",
f"{test_dir_tup[2]}{os.sep}test3.txt",
f"{test_dir_tup[2]}{os.sep}test3.cfg")
test_test_dir_tup = tuple(f"{cc}{os.sep}test" for cc in test_dir_tup)
for test_dir in test_dir_tup+test_test_dir_tup:
os.makedirs(test_dir)
for test_file in test_tup+test1_tup+test2_tup+test3_tup:
open(test_file, "w").close()
self.assertEqual(
set(pcom.find_iter(self.base_dir, "*.log")),
{test_tup[0], test1_tup[0], test2_tup[0], test3_tup[0]})
self.assertEqual(
set(pcom.find_iter(self.base_dir, "*test1*")),
set(test1_tup))
self.assertEqual(
set(pcom.find_iter(self.base_dir, "*.log", True)),
set())
self.assertEqual(
set(pcom.find_iter(self.base_dir, "*test*", True)),
set(test_dir_tup+test_test_dir_tup))
self.assertEqual(
set(pcom.find_iter(self.base_dir, "*.log", cur_flg=True)),
{test_tup[0]})
self.assertEqual(
set(pcom.find_iter(test_dir_tup[0], "*.log", cur_flg=True)),
{test1_tup[0]})
self.assertEqual(
set(pcom.find_iter(self.base_dir, "*test*", True, True)),
set(test_dir_tup))
def tearDown(self):
shutil.rmtree(self.base_dir)
del self.base_dir
if __name__ == "__main__":
unittest.main()
| cmos3511/cmos_linux | python/op/op/tests/test_pcom.py | Python | gpl-3.0 | 4,016 |
# -*- coding:utf-8 -*-
'''
Create an unified test_stub for E2E test operations
@author: Legion
'''
import os
import re
import time
import types
import random
import socket
import xml.dom.minidom as minidom
from os.path import join
from zstackwoodpecker.e2e_lib import E2E
import zstackwoodpecker.operations.resource_operations as res_ops
from zstackwoodpecker import test_util
import zstacklib.utils.jsonobject as jsonobject
LOG_FILE_PATH = '/root/.zstackwoodpecker/integrationtest/vm/e2e_mini/'
MESSAGETOAST = 'ant-notification-notice-message'
CARDCONTAINER = 'ant-card|ant-table-row'
MODALCONTENT = 'ant-modal-content'
PRIMARYBTN = 'ant-btn-primary'
BTN = 'ant-btn'
CONFIRMBTN = '#modal-btn-confirm'
CANCELBTN = '#modal-btn-cancel'
CRTADDBTN = '#btn-create|#btn-add'
EXITBTN = 'ant-modal-close-x'
ICONBTN = 'iconButton___2NyZB'
TABLEROW = 'ant-table-row ant-table-row-level-0'
CHECKBOX = 'input[type="checkbox"]'
FORMEXPLAIN = 'ant-form-explain'
SPINCONTAINER = 'ant-spin-container'
CONFIRMITEM = 'confirmItem___1ZEQE'
MENUSETTING = '#avatar'
OPS_ONGOING = '#operationhint_ongoing'
OPS_SUCCESS = '#operationhint_success'
OPS_FAIL = '#operationhint_fail'
ANTITEM = 'ant-dropdown-menu-item|ant-menu-item'
INPUTROW = 'ant-row ant-form-item'
PRIMARYBTNNUM = 2
MENUDICT = {'homepage': 'a[href="/web/"]',
'monitor': 'a[href="/web/monitoringCenter"]',
'vm': 'a[href="/web/vm"]',
'minihost': 'a[href="/web/minihost"]',
'ps': 'a[href="/web/primaryStorage"]',
'volume': 'a[href="/web/volume"]',
'image': 'a[href="/web/image"]',
'network': 'a[href="/web/network"]',
'alarm': 'a[href="/web/alarmMessage"]',
'eip': 'a[href="/web/eip"]',
'log': 'a[href="/web/operationLog"]'}
def get_mn_ip():
if os.getenv('ZSTACK_SIMULATOR') == "yes":
# Just get the local ip
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return str(ip), ""
dom_path = '/'.join(os.getcwd().split('/')[:3]) + "/scenario-file.xml"
dom = minidom.parse(dom_path)
root = dom.documentElement
item_list = root.getElementsByTagName('vm')
first_mn_ip = item_list[0].getAttribute('ip')
second_mn_ip = item_list[1].getAttribute('ip')
return str(first_mn_ip), str(second_mn_ip)
def get_time_postfix():
rand_postfix = str(random.random()).split('.')[-1]
return time.strftime('%y%m%d-%H%M%S', time.localtime()) + '-' + rand_postfix
def get_inv(name, res_type):
res_dict = {'vm': res_ops.VM_INSTANCE,
'volume': res_ops.VOLUME,
'minihost': res_ops.CLUSTER,
'network': res_ops.L3_NETWORK,
'image': res_ops.IMAGE,
'primaryStorage': res_ops.PRIMARY_STORAGE,
'eip': res_ops.EIP,
'backup': res_ops.VOLUME_BACKUP}
conditions = res_ops.gen_query_conditions('name', '=', name)
inv = res_ops.query_resource(res_dict[res_type], conditions)
if inv:
return inv[0]
else:
test_util.test_fail('Can not find the [%s] with name [%s]' % (res_type, name.encode('utf-8')))
class MINI(E2E):
def __init__(self):
super(MINI, self).__init__()
if os.getenv('ZSTACK_SIMULATOR'):
self.mini_server_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
else:
self.mini_server_ip = os.getenv('zstackHaVip')
self.url('http://%s:8200' % self.mini_server_ip)
self.window_size(1600, 900)
self.login()
def login(self, password='password'):
test_util.test_logger('Log in normally')
self.get_element('#account').input('admin')
self.get_element('#password').input(password)
self.get_element('#btn-login').click()
time.sleep(1)
def logout(self):
test_util.test_logger('Log out')
self.get_element(MENUSETTING).move_cursor_here()
self.get_element('#nav-logout')
if not self.wait_for_element('#btn-login'):
test_util.test_fail('Failed to Logout')
def change_mini_password(self, password='123456'):
test_util.test_logger('Change the MINI password to [%s]' % password)
self.get_element(MENUSETTING).move_cursor_here()
self.get_element('#nav-password').click()
self.get_element('#newPassword').input(password)
self.get_element('#confirmPassword').input(password)
self.click_ok()
def login_with_cleartext_password(self):
test_util.test_logger('Log in with clear-text password')
self.get_element('#account').input('admin')
passwordInput = self.get_element('#password')
assert passwordInput.get_attribute('type') == 'password'
self.get_element('ant-input-suffix').click()
passwordInput.input('password')
assert passwordInput.get_attribute('type') == 'text'
self.get_element('#btn-login').click()
def login_without_account_or_password(self, with_account=False, with_password=False):
test_util.test_logger('Log in without account or password')
if with_account:
self.get_element('#account').input('admin')
if with_password:
self.get_element('#password').input('password')
# Login button
self.get_element('#btn-login').click()
time.sleep(1)
# check
if not with_account or not with_password:
self.wait_for_element(FORMEXPLAIN)
if not with_account and with_password:
assert self.get_element(FORMEXPLAIN).text == u'请输入账户名'
elif with_account and not with_password:
assert self.get_element(FORMEXPLAIN).text == u'请输入密码'
elif not with_account and not with_password:
assert self.get_elements(FORMEXPLAIN)[0].text == u'请输入账户名'
assert self.get_elements(FORMEXPLAIN)[1].text == u'请输入密码'
def login_with_wrong_account_or_password(self, wrong_account=True, wrong_password=True):
test_util.test_logger('Log in with wrong account or password')
if wrong_account:
self.get_element('#account').input('wrongadmin')
else:
self.get_element('#account').input('admin')
if wrong_password:
self.get_element('#password').input('wrongpassword')
else:
self.get_element('#password').input('password')
# Login button
self.get_element('#btn-login').click()
self.wait_for_element(FORMEXPLAIN)
assert self.get_element(FORMEXPLAIN).text == u'账户名或密码错误'
def navigate(self, menu):
current_url = self.get_url()
if menu not in current_url.split('/')[-1]:
test_util.test_logger('Navigate to [%s]' % menu)
self.get_element(MENUDICT[menu]).click()
self.wait_for_element(PRIMARYBTN)
time.sleep(2)
if menu == "image":
pattern = re.compile(u'(镜像仓库剩余容量)\s\d+\.?\d*\s(GB,总容量)\s\d+\.?\d*\s(GB)')
if re.search(pattern, self.get_elements("ant-row-flex-space-between")[0].text) is None:
test_util.test_fail("Err: page image is not fully loaded")
def switch_view(self, view='card'):
if view == 'card':
btn_id = '#btn-cardview'
else:
btn_id = '#btn-tableview'
test_util.test_logger('Switch view mode to [%s]' % view.capitalize())
self.get_element(btn_id).click()
time.sleep(1)
def switch_tab(self, tab_name):
tab_id = '#nav-%s|#tab-%s' % (tab_name, tab_name)
test_util.test_logger('Switch to tab [%s]' % tab_name.capitalize())
self.wait_for_page_render()
self.get_element(tab_id).click()
time.sleep(1)
def switch_radio_btn(self, btn_name):
test_util.test_logger('Switch to radio btn [%s]' % btn_name.encode('utf-8'))
self.wait_for_page_render()
for btn in self.get_elements('ant-radio-button-wrapper'):
if btn_name in btn.text:
btn.click()
break
else:
test_util.test_fail("Fail to switch the radio btn to %s" % btn_name.encode('utf-8'))
time.sleep(1)
def operate(self, name):
test_util.test_logger('Execute operation [%s]' % name.encode('utf-8'))
_elem = self.get_elements(VMACTIONSCONTAINER)
for op in _elem[0].get_elements('span', 'tag name') if _elem else self.get_elements(ANTITEM):
if op.enabled and op.text == name:
op.click()
time.sleep(1)
return True
def click_ok(self, assure_success=True, not_check=False):
test_util.test_logger('Click OK button')
self.wait_for_page_render()
self.get_element(CONFIRMBTN).click()
if not_check:
time.sleep(1)
return
if not self.wait_for_element(MESSAGETOAST):
test_util.test_fail('Fail: No Message Toast Appeared After Clicking OK')
if assure_success:
if self.get_elements(OPS_ONGOING):
if self.wait_for_element(OPS_ONGOING, timeout=300, target='disappear'):
while 1:
if self.get_elements(OPS_FAIL):
test_util.test_fail("Fail: Operation Failed!")
elif self.get_elements(OPS_SUCCESS):
break
else:
test_util.test_fail("Fail: Operation Timeout!")
elif self.get_elements(OPS_FAIL):
test_util.test_fail("Fail: Operation Failed!")
self.wait_for_element(MESSAGETOAST, timeout=300, target='disappear')
time.sleep(1)
def click_cancel(self):
test_util.test_dsc('Click cancel button')
self.get_element(CANCELBTN).click()
if not self.wait_for_element(MODALCONTENT, target='disappear'):
test_util.test_fail("Fail to click cancel btn")
def click_close(self):
test_util.test_logger('Click close button')
self.get_element(EXITBTN).click()
if not self.wait_for_element(MODALCONTENT, target='disappear'):
test_util.test_fail("Fail to click close btn")
def more_operate(self, btn_id, res_name, res_type=None, details_page=False):
test_util.test_logger('Start more operate')
res_list = []
self.wait_for_element(CARDCONTAINER)
if isinstance(res_name, types.ListType):
res_list = res_name
else:
res_list.append(res_name)
if details_page:
if len(res_list) == 1:
self.enter_details_page(res_type, res_list[0])
else:
test_util.test_fail('Multiple resource can not enter details page together')
else:
for res in res_list:
elem = self.get_res_element(res)
time.sleep(1)
if not elem.get_element(CHECKBOX).selected:
test_util.test_logger('Select [%s]' % res.encode('utf-8'))
elem.get_element(CHECKBOX).click()
self.click_button('more')
time.sleep(1)
self.click_button(btn_id)
test_util.test_logger('Finish more operate')
def enter_details_page(self, res_type, name):
inv = get_inv(name, res_type)
lnk = 'a[href="/web/%s/%s"]' % (res_type, inv.uuid)
test_util.test_logger('Enter into details page')
self.get_element(lnk).click()
time.sleep(1)
def cancel_create_operation(self, res_type, close=False):
test_util.test_logger('Cancel create operation of %s' % res_type)
self.navigate(res_type)
self.get_elements(PRIMARYBTN)[-1].click()
time.sleep(1)
if close:
self.click_close()
else:
self.click_cancel()
def cancel_more_operation(self, op_name, res_name, res_type, details_page=False, close=False):
test_util.test_logger('Cancel more operation [%s] of %s' % (op_name.encode('utf-8'), res_type))
self.navigate(res_type)
self.more_operate(op_name, res_name, res_type, details_page)
if close:
self.click_close()
else:
self.click_cancel()
def create(self, para_dict, res_type, view, priority_dict=None):
self.navigate(res_type)
self.get_element(CRTADDBTN).click()
time.sleep(1)
for _elem in self.get_element(MODALCONTENT).get_elements('span', 'tag name'):
if _elem.text == u'高级':
_elem.click()
break
if priority_dict:
for k, v in priority_dict.iteritems():
if v is not None:
self.input(k, v)
for k, v in para_dict.iteritems():
if v is not None:
self.input(k, v)
self.click_ok(not_check=True)
if not self.wait_for_element(MODALCONTENT, timeout=3, target='disappear'):
if self.wait_for_element(FORMEXPLAIN, timeout=3):
for elem in self.get_elements(FORMEXPLAIN):
test_util.test_logger('Error:' + elem.text.encode('utf-8'))
test_util.test_fail('Create Error: check the previous error message')
self.wait_for_element(MESSAGETOAST)
if self.wait_for_element(OPS_ONGOING, timeout=300, target='disappear'):
while 1:
if self.get_elements(OPS_FAIL):
test_util.test_fail("Fail: Operation Failed!")
elif self.get_elements(OPS_SUCCESS):
break
else:
test_util.test_fail("Fail: Operation Timeout!")
self.wait_for_element(MESSAGETOAST, timeout=300, target='disappear')
self.switch_view(view)
elem = self.get_res_element(para_dict['name'])
return elem
def delete(self, res_name, res_type, view, corner_btn=False, expunge=False, details_page=False, del_vol=False):
isExpunge = False
res_list = []
if isinstance(res_name, types.ListType):
res_list = res_name
if len(res_list) > 1 and (corner_btn or details_page):
test_util.test_fail("The args 'corner_btn' and 'details_page' are not for batch operation")
else:
res_list.append(res_name)
if corner_btn and details_page:
test_util.test_fail("The args 'corner_btn' and 'details_page' can not be both True")
self.navigate(res_type)
self.switch_view(view)
primary_btn_num = len(self.get_elements(PRIMARYBTN))
test_util.test_logger('%s %s [name: (%s)]' % (('Expunge' if primary_btn_num < PRIMARYBTNNUM else 'Delete'), res_type, ' '.join(res_list).encode('utf-8')))
for res in res_list:
_elem = self.get_res_element(res)
if corner_btn:
_elem.get_elements(ICONBTN)[-1].click()
break
elif expunge and (primary_btn_num < PRIMARYBTNNUM):
isExpunge = True
if details_page:
self.more_operate(op_name=u'彻底删除', res_type=res_type, res_name=res_list, details_page=details_page)
break
else:
_elem.get_element(CHECKBOX).click()
else:
self.more_operate(op_name=u'删除', res_type=res_type, res_name=res_list, details_page=details_page)
self.check_confirm_item(res_list)
break
else:
self.click_button(u'彻底删除')
self.check_confirm_item(res_list)
if del_vol:
vol_check = self.get_element('#deleteVolume')
if vol_check:
vol_check.click()
self.click_ok()
if details_page:
self.navigate(res_type)
if res_type not in ['network', 'eip']:
self.switch_tab('#nav-deleted')
if isExpunge:
self.check_res_item(res_list, target='notDisplayed')
return True
if res_type not in ['network', 'eip']:
# check deleted
self.check_res_item(res_list)
else:
self.check_res_item(res_list, target='notDisplayed')
if expunge:
self.delete(res_list, res_type, view=view, expunge=True, details_page=details_page)
def resume(self, res_name, res_type, view='card', details_page=False):
res_list = []
if isinstance(res_name, types.ListType):
res_list = res_name
if len(res_list) > 1 and details_page:
test_util.test_fail("The args 'details_page' are not for batch operation")
else:
res_list.append(res_name)
self.navigate(res_type)
self.switch_tab('deleted')
self.switch_view(view)
test_util.test_logger('Resume %s [name: (%s)]' % (res_type, ' '.join(res_list)))
for res in res_list:
_elem = self.get_res_element(res)
if details_page:
self.more_operate(op_name=u'恢复',
res_type=res_type,
res_name=res_list,
details_page=details_page)
self.click_ok()
break
else:
_elem.get_element(CHECKBOX).click()
else:
self.click_button('recovery')
self.click_ok()
self.wait_for_element(MESSAGETOAST, timeout=30, target='disappear')
self.navigate(res_type)
self.switch_tab('available')
self.check_res_item(res_list)
def input(self, label, content):
css_selector = 'label[for="%s"]' % label
selection_rendered = 'ant-select-selection__rendered'
radio_group = 'ant-radio-group'
title = None
def select_opt(elem, opt_value):
opt_list = []
elem.get_element(selection_rendered).click()
time.sleep(1)
if not isinstance(opt_value, types.ListType):
opt_list.append(opt_value)
else:
opt_list = opt_value
for _opt in opt_list:
for opt in self.get_elements('li[role="option"]'):
if opt.displayed() and _opt in opt.text:
opt.click()
break
def select_radio(elem, value):
for opt in self.get_elements('input[type="radio"]'):
if value == opt.get_attribute('value'):
opt.click()
def input_content(elem, content):
element = elem.get_element('input', 'tag name')
element.clear()
if content != '':
element.input(content)
def textarea_content(elem, content):
element = elem.get_element('textarea', 'tag name')
element.clear()
if content != '':
element.input(content)
for _ in range(20):
elems = self.get_elements(INPUTROW)
if elems:
break
else:
time.sleep(0.5)
else:
test_util.test_fail('Can not find elements with selector: [%s]' % INPUTROW)
for elem in self.get_elements(INPUTROW):
title_elem = elem.get_elements(css_selector)
if title_elem:
title = title_elem[0].text.encode('utf-8')
break
if isinstance(content, types.IntType):
content = str(content)
if isinstance(content, types.ListType) and content[0].isdigit():
test_util.test_logger('Input [%s] for [%s]' % (content[0].encode('utf-8'), title))
test_util.test_logger('Select [%s] for [%s]' % (content[1].encode('utf-8'), title))
input_content(elem, content[0])
select_opt(elem, content[1])
else:
if elem.get_elements(selection_rendered):
opt_list = []
if not isinstance(content, types.ListType):
opt_list.append(content)
else:
opt_list = content
test_util.test_logger('Select [%s] for [%s]' % (', '.join(opt_list).encode('utf-8'), title))
select_opt(elem, opt_list)
elif elem.get_elements(radio_group):
test_util.test_logger('Select [%s] for [%s]' % (content.encode('utf-8'), title))
select_radio(elem, content)
elif elem.get_elements('textarea[id="description"]'):
test_util.test_logger('Input [%s] for [%s]' % (content.encode('utf-8'), title))
textarea_content(elem, content)
else:
test_util.test_logger('Input [%s] for [%s]' % (content.encode('utf-8'), title))
input_content(elem, content)
def get_res_element(self, res_name):
test_util.test_logger('Get the element [%s]' % res_name.encode('utf-8'))
for _elem in self.get_elements(CARDCONTAINER):
if res_name in _elem.text:
return _elem
test_util.test_fail('Can not find [%s]' % res_name.encode('utf-8'))
def get_table_row(self, res_list):
for res in res_list:
for _row in self.get_elements(TABLEROW):
if res in _row.text.encode('utf-8'):
_row.get_element(CHECKBOX).click()
break
else:
test_util.test_fail('Can not find the res with name [%s]' % res)
def get_detail_info(self, res_name, res_type, info_name):
test_util.test_logger('Get the detail info of [%s] with info name [%s]' % (res_name, info_name.encode('utf-8')))
self.enter_details_page(res_type, res_name)
for elem in self.get_elements("cardField___2SE_p"):
if info_name in elem.text:
info = elem.get_element('ant-typography-ellipsis').text
self.go_backward()
self.wait_for_page_render()
return info
test_util.test_fail('Can not find the detail info of [%s] with info name [%s]' % (res_name, info_name.encode('utf-8')))
def check_res_item(self, res_list, target='displayed'):
test_util.test_logger('Check if %s %s' % (res_list, target))
if not isinstance(res_list, types.ListType):
test_util.test_fail("The first parameter of function[check_res_item] expected list_type")
for res in res_list:
expected = '[%s] is expected to be [%s]!' % (res, target)
all_res_text = self.get_elements(SPINCONTAINER)[-1].text
if target == 'displayed':
assert res in all_res_text, expected
else:
assert res not in all_res_text, expected
test_util.test_logger('%s %s, check Pass' % (res_list, target))
def check_confirm_item(self, res_list):
test_util.test_logger('Check if %s confirmed' % res_list)
self.wait_for_element(MODALCONTENT)
confirm_items = self.get_elements(CONFIRMITEM)
for res in res_list:
for item in confirm_items:
if res == item.text:
break
else:
test_util.test_fail('%s should to be confirmed' % res)
def check_menu_item_disabled(self, name, res_type, btn_id):
self.navigate(res_type)
elem = self.get_res_element(name)
if not elem.get_element(CHECKBOX).selected:
elem.get_element(CHECKBOX).click()
self.click_button('more')
assert 'disabled' in self.get_element(btn_id).get_attribute('class')
self.get_element(btn_id).click()
assert not self.get_elements(btn_id)
def check_browser_console_log(self):
errors = []
logs = self.get_console_log()
for log in logs:
if log.level == 'SEVERE':
errors.append(log.messge)
if errors:
if os.getenv('FailWhenConsoleError'):
test_util.test_fail("Browser console errors: %s" % errors)
else:
test_util.test_logger("Browser console errors: %s" % errors)
def wait_for_page_render(self):
for _ in xrange(10):
# Refresh button element: get_elements(PRIMARYBTN)[0]
if self.get_elements(PRIMARYBTN):
if self.get_elements(PRIMARYBTN)[0].get_attribute('disabled') == 'true':
test_util.test_logger('The page rendering is not finished, check again')
time.sleep(0.5)
else:
time.sleep(1)
return True
else:
time.sleep(1)
def end_action(self, action_name):
if action_name == 'confirm':
self.click_ok()
elif action_name == 'cancel':
self.click_cancel()
elif action_name == 'close':
self.click_close()
def update_info(self, res_type, res_name, new_name, new_dsc=None, corner_btn=False, details_page=False, view='card'):
if res_type == 'host' and corner_btn:
test_util.test_fail('Host do not support to update info by corner btn.')
if res_type == 'minihost' and not details_page:
test_util.test_fail('Cluster only support to update info by details_page.')
check_list = []
if res_type == 'host':
self.navigate('minihost')
else:
self.navigate(res_type)
self.switch_view(view)
if res_type == 'host':
for elem in self.get_elements('ant-row-flex-middle'):
if res_name in elem.text:
if not details_page:
elem.get_element(CHECKBOX).click()
else:
elem.get_element('left-part').click()
time.sleep(1)
break
self.click_button('more')
time.sleep(1)
self.operate(u'修改信息')
else:
_elem = self.get_res_element(res_name)
if corner_btn:
_elem.get_elements(ICONBTN)[1].click()
else:
self.more_operate(u'修改信息', res_name=res_name, res_type=res_type, details_page=details_page)
if new_name is not None:
test_util.test_logger('Update the name of [%s] to %s' % (res_name, new_name))
self.input('name', new_name)
check_list.append(new_name)
if new_dsc is not None:
test_util.test_logger('Update the dsc of [%s] to %s' % (res_name, new_dsc))
self.input('description', new_dsc)
self.click_ok()
def search(self, value, search_by=u'名称', res_type='vm', tab_name='available', not_null=False):
test_util.test_logger('Search %s by %s' % (value.encode('utf-8'), search_by.encode('utf-8')))
self.navigate(res_type)
self.switch_tab(tab_name)
self.get_element('ant-input-group-addon').click()
self.wait_for_element('ul[role="listbox"]')
time.sleep(1)
for op in self.get_elements('li[role="option"]'):
if op.text == search_by:
op.click()
break
else:
test_util.test_fail("Failed: Search by [%s] is not supported" % search_by.encode('utf-8'))
self.get_element('ant-input').clear()
# u'\ue007' means sending enter key
self.get_element('ant-input').input(u'\ue007')
self.wait_for_page_render()
self.get_element('ant-input').input(value + u'\ue007')
self.wait_for_page_render()
# check
for _elem in self.get_elements(CARDCONTAINER):
assert value.lower() in _elem.text.lower()
res_num = len(self.get_elements(CARDCONTAINER))
if not_null:
assert res_num > 0
for tab in self.get_elements('ant-tabs-tab'):
if tab_name in tab.text:
assert str(res_num) in tab.text
def upgrade_capacity(self, name, res_type, new_capacity, details_page=False):
self.navigate(res_type)
capacity = self.get_detail_info(name, res_type, u'容量')
test_util.test_logger('Upgrade system capacity of [%s] from %s to %s' % (name, capacity, new_capacity))
if res_type == 'vm':
self.more_operate(u'系统扩容', res_type=res_type, res_name=name, details_page=details_page)
elif res_type == 'volume':
self.more_operate(u'数据盘扩容', res_type=res_type, res_name=name, details_page=details_page)
self.input('dataSize', new_capacity.split())
self.click_ok()
capacity = self.get_detail_info(name, res_type, u'容量')
if capacity != new_capacity:
test_util.test_fail("Failed to upgrade capacity of [%s] to %s" % (name, new_capacity))
def create_backup(self, name, res_type, backup_name, backup_dsc=None, end_action='confirm'):
test_util.test_logger('%s[%s] create backup[%s]' % (res_type.upper(), name, backup_name))
self.navigate(res_type)
self.enter_details_page(res_type, name)
self.switch_tab('backup')
self.get_element('#btn-operation-sub').click()
self.click_button('createBackup')
self.input('name', backup_name)
if backup_dsc is not None:
self.input('description', backup_dsc)
self.end_action(end_action)
if end_action == 'confirm':
backup_list = []
backup_list.append(backup_name)
self.check_res_item(backup_list)
def delete_backup(self, name, res_type, backup_name, end_action='confirm'):
backup_list = []
if isinstance(backup_name, types.ListType):
backup_list = backup_name
else:
backup_list.append(backup_name)
test_util.test_logger('%s[%s] delete backup (%s)' % (res_type.upper(), name, ' '.join(backup_list)))
self.navigate(res_type)
self.enter_details_page(res_type, name)
self.switch_tab('backup')
self.get_table_row(backup_list)
self.get_element('#btn-operation-sub').click()
self.click_button('deleteBackup')
self.end_action(end_action)
if end_action == 'confirm':
self.check_res_item(backup_list, target='notDisplayed')
def restore_backup(self, name, res_type, backup_name, end_action='confirm'):
test_util.test_logger('%s[%s] restore backup[%s]' % (res_type.upper(), name, backup_name))
self.navigate(res_type)
checker = MINICHECKER(self, self.get_res_element(name))
if res_type == 'vm':
checker.vm_check(ops='stop')
self.enter_details_page(res_type, name)
self.switch_tab('backup')
backup_list = []
backup_list.append(backup_name)
self.get_table_row(backup_list)
self.get_element('#btn-operation-sub').click()
self.click_button('revertBackup')
self.end_action(end_action)
image_name = 'for-recover-volume-from-backup-' + str(get_inv(backup_name, 'backup').uuid)
image_list = []
image_list.append(image_name)
if res_type == 'vm':
self.navigate('image')
self.check_res_item(image_list)
def get_err_log(self, filename=None):
filename = '.'.join(['err_log-' + get_time_postfix(), 'tmpt']) if filename is None else filename
test_util.test_logger("Get err log")
self.navigate('log')
next_btn = self.get_elements("button", "tag name")[-1]
while next_btn.enabled:
for line in self.get_elements('ant-table-row-level-0'):
if u"失败" in line.text:
arrow = line.get_element('anticon-down')
arrow.click()
for log_content in self.get_elements('ant-table-expanded-row-level-1'):
if log_content.displayed():
log_body = log_content.get_element('body___2c2z6')
break
try:
with open(join(LOG_FILE_PATH, filename), 'ab') as f:
f.write(line.text.encode("utf-8"))
f.write("\n")
f.write(log_body.text.encode("utf-8"))
f.write("\n----------*----------\n")
except IOError:
test_util.test_fail("Fail: IOError")
arrow.click()
next_btn.click()
def check_about_page(self):
test_util.test_logger('Check about page')
self.get_element(MENUSETTING).move_cursor_here()
self.click_button('about')
mn_ip_list = []
mn_ip_1, mn_ip_2 = get_mn_ip()
mn_ip_list.append(mn_ip_1)
mn_ip_list.append(mn_ip_2)
ha_vip = os.getenv('zstackHaVip')
assert ha_vip == self.get_elements("ipAddress___japHh")[0].text
for elem in self.get_elements("ipAddress___japHh")[1:]:
assert elem.text in mn_ip_list
def check_config_page(self):
test_util.test_logger('Check config page')
self.get_element(MENUSETTING).move_cursor_here()
self.click_button('setting')
self.click_button('add')
self.wait_for_element('name')
self.click_cancel()
self.switch_tab('server')
self.click_button('add')
self.wait_for_element('name')
# if don't wait 1s, will fail to click close btn
time.sleep(1)
self.click_close()
def save_element_location(self, filename=None):
filename = '.'.join(['elem_location-' + get_time_postfix(), 'tmpt']) if filename is None else filename
for menu, page in MENUDICT.items():
loc = {}
loc[menu] = self.get_element(page).location
json_loc = jsonobject.dumps(loc)
try:
with open(join(LOG_FILE_PATH, filename), 'ab') as f:
f.write(json_loc)
except IOError:
test_util.test_fail("Fail: IOError")
def enabled_status_checker(self):
self.navigate('vm')
btn_elems = self.get_elements('button', 'tag name')
start_btn = [btn_elem for btn_elem in btn_elems if btn_elem.text == u'启动'][0]
stop_btn = [btn_elem for btn_elem in btn_elems if btn_elem.text == u'停止'][0]
assert start_btn.enabled == False
assert start_btn.enabled == False
vm_elems = self.get_elements(CARDCONTAINER)
vm_checkboxs = self.get_elements(CHECKBOX)
# the checkboxs clicked will detach to the page document
def update_checkboxs():
return self.get_elements(CHECKBOX)
assert len(vm_elems) == len(vm_checkboxs)
vm_checkboxs[0].click()
vm_checkboxs = update_checkboxs()
assert vm_checkboxs[0].selected == True
first_vm_status = vm_elems[0].get_elements('labelContainer___10VVH')[0].text
if first_vm_status == u"运行中":
assert start_btn.enabled == False
assert stop_btn.enbled == True
elif first_vm_status == u"已停止":
assert start_btn.enabled == True
assert stop_btn.enabled == False
if len(vm_elems) > 1:
vm_checkboxs = update_checkboxs()
vm_checkboxs[1].click()
vm_checkboxs = update_checkboxs()
assert vm_checkboxs[0].selected == True
assert vm_checkboxs[1].selected == True
second_vm_status = vm_elems[1].get_elements('labelContainer___10VVH')[0].text
if first_vm_status != second_vm_status:
assert start_btn.enabled == False
assert stop_btn.enabled == False
return True
class MINICHECKER(object):
def __init__(self, obj, elem):
self.obj = obj
self.elem = elem
test_util.test_logger('Elemnet text to check:\n%s' % elem.text.encode('utf-8'))
def vm_check(self, inv=None, check_list=[], ops=None):
ops_dict = {'new_created': u'运行中',
'start': u'运行中',
'stop': u'已停止',
'resume': u'已停止'}
if ops:
check_list.append(ops_dict[ops])
for v in check_list:
not_ready_list = [u'重启中']
if any(x in self.elem.text for x in not_ready_list):
time.sleep(1)
continue
elif v not in self.elem.text:
test_util.test_fail("Can not find %s in vm checker" % v.encode('utf-8'))
test_util.test_logger("Find %s in vm checker successful" % v.encode('utf-8'))
def volume_check(self, check_list=[], ops=None):
if ops == 'detached':
check_list.append(u'未加载')
for v in check_list:
if v not in self.elem.text:
test_util.test_fail("Can not find %s in volume checker" % v.encode('utf-8'))
test_util.test_logger("Find %s in volume checker successful" % v.encode('utf-8'))
def image_check(self, check_list=[]):
check_list.append(u'就绪')
for v in check_list:
not_ready_list = [u'下载中', u'计算中', u'解析中']
if any(x in self.elem.text for x in not_ready_list):
time.sleep(1)
continue
elif v not in self.elem.text:
test_util.test_fail("Can not find %s in image checker" % v.encode('utf-8'))
test_util.test_logger("Find %s in image checker successful" % v.encode('utf-8'))
def network_check(self, check_list=[]):
for v in check_list:
if v not in self.elem.text:
test_util.test_fail("Can not find %s in network checker" % v.encode('utf-8'))
test_util.test_logger("Find %s in network checker successful" % v.encode('utf-8'))
def host_check(self, check_list=[]):
for v in check_list:
if v not in self.elem.text:
test_util.test_fail("Can not find %s in host checker" % v.encode('utf-8'))
test_util.test_logger("Find %s in host checker successful" % v.encode('utf-8'))
def eip_check(self, check_list=[]):
for v in check_list:
if v not in self.elem.text:
test_util.test_fail("Can not find %s in eip checker" % v.encode('utf-8'))
test_util.test_logger("Find %s in eip checker successful" % v.encode('utf-8'))
| zstackio/zstack-woodpecker | integrationtest/vm/e2e_mini/test_stub.py | Python | apache-2.0 | 38,710 |
"""
System tests for `jenkinsapi.jenkins` module.
"""
import time
import logging
import pytest
from jenkinsapi.build import Build
from jenkinsapi.queue import QueueItem
from jenkinsapi_tests.test_utils.random_strings import random_string
from jenkinsapi_tests.systests.job_configs import LONG_RUNNING_JOB
from jenkinsapi_tests.systests.job_configs import SHORTISH_JOB, EMPTY_JOB
from jenkinsapi.custom_exceptions import BadParams, NotFound
log = logging.getLogger(__name__)
def test_invocation_object(jenkins):
job_name = 'Acreate_%s' % random_string()
job = jenkins.create_job(job_name, SHORTISH_JOB)
qq = job.invoke()
assert isinstance(qq, QueueItem)
# Let Jenkins catchup
qq.block_until_building()
assert qq.get_build_number() == 1
def test_get_block_until_build_running(jenkins):
job_name = 'Bcreate_%s' % random_string()
job = jenkins.create_job(job_name, LONG_RUNNING_JOB)
qq = job.invoke()
time.sleep(3)
bn = qq.block_until_building(delay=3).get_number()
assert isinstance(bn, int)
build = qq.get_build()
assert isinstance(build, Build)
assert build.is_running()
build.stop()
# if we call next line right away - Jenkins have no time to stop job
# so we wait a bit
time.sleep(1)
assert not build.is_running()
console = build.get_console()
assert isinstance(console, str)
assert 'Started by user' in console
def test_get_block_until_build_complete(jenkins):
job_name = 'Ccreate_%s' % random_string()
job = jenkins.create_job(job_name, SHORTISH_JOB)
qq = job.invoke()
qq.block_until_complete()
assert not qq.get_build().is_running()
def test_mi_and_get_last_build(jenkins):
job_name = 'Dcreate_%s' % random_string()
job = jenkins.create_job(job_name, SHORTISH_JOB)
for _ in range(3):
ii = job.invoke()
ii.block_until_complete(delay=2)
build_number = job.get_last_good_buildnumber()
assert build_number == 3
build = job.get_build(build_number)
assert isinstance(build, Build)
build = job.get_build_metadata(build_number)
assert isinstance(build, Build)
def test_mi_and_get_build_number(jenkins):
job_name = 'Ecreate_%s' % random_string()
job = jenkins.create_job(job_name, EMPTY_JOB)
for invocation in range(3):
qq = job.invoke()
qq.block_until_complete(delay=1)
build_number = qq.get_build_number()
assert build_number == invocation + 1
def test_mi_and_delete_build(jenkins):
job_name = 'Ecreate_%s' % random_string()
job = jenkins.create_job(job_name, EMPTY_JOB)
for invocation in range(3):
qq = job.invoke()
qq.block_until_complete(delay=1)
build_number = qq.get_build_number()
assert build_number == invocation + 1
# Delete build using Job.delete_build
job.get_build(1)
job.delete_build(1)
with pytest.raises(NotFound):
job.get_build(1)
# Delete build using Job as dictionary of builds
assert isinstance(job[2], Build)
del job[2]
with pytest.raises(NotFound):
job.get_build(2)
with pytest.raises(NotFound):
job.delete_build(99)
def test_give_params_on_non_parameterized_job(jenkins):
job_name = 'Ecreate_%s' % random_string()
job = jenkins.create_job(job_name, EMPTY_JOB)
with pytest.raises(BadParams):
job.invoke(build_params={'foo': 'bar', 'baz': 99})
def test_keep_build_toggle(jenkins):
job_name = 'Ecreate_%s' % random_string()
job = jenkins.create_job(job_name, EMPTY_JOB)
qq = job.invoke()
qq.block_until_complete(delay=1)
build = job.get_last_build()
assert not build.is_kept_forever()
build.toggle_keep()
assert build.is_kept_forever()
build_number = job.get_last_buildnumber()
job.toggle_keep_build(build_number)
build = job.get_last_build()
assert not build.is_kept_forever()
| salimfadhley/jenkinsapi | jenkinsapi_tests/systests/test_invocation.py | Python | mit | 3,914 |
"""
MAUS worker utilities.
"""
# This file is part of MAUS: http://micewww.pp.rl.ac.uk:8080/projects/maus
#
# MAUS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAUS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAUS. If not, see <http://www.gnu.org/licenses/>.
from types import ListType
from types import StringType
from types import UnicodeType
from MapPyGroup import MapPyGroup
class WorkerUtilities: # pylint: disable=W0232
"""
MAUS worker utility methods.
"""
@classmethod
def create_transform(cls, transform):
"""
Create a transform given the name of transform
class(es). Either a single name can be given - representing a
single transform - or a list of transforms - representing a
MapPyGroup. Sub-lists are treated as nested
MapPyGroups. Example list arguments include:
@verbatim
[]
["MapCppTOFDigits", "MapCppTOFSlabHits", "MapCppTOFSpacePoint"]
["MapCppTOFDigits", ["MapCppTOFSlabHits", "MapCppTOFSpacePoint"]]
@endverbatim
Transforms must be in modules named after the transform class
e.g. for the above the transform MapCppTOFSlabHits must be
in a class MapCppTOFSlabHits.MapCppTOFSlabHits.
@param cls Class reference.
@param transform Transform name or list of names.
@return transform object or MapPyGroup object (if given a list).
@throws ValueError if transform is not a string or a list,
or contains an element which is not a list or string, or
specifies an unknown transform name.
"""
if isinstance(transform, ListType):
group = MapPyGroup()
for transform_name in transform:
group.append(cls.create_transform(transform_name))
return group
elif isinstance(transform, StringType) \
or isinstance(transform, UnicodeType):
transform_class = cls._get_transform_class(transform)
return transform_class()
else:
raise ValueError("Transform name %s is not a string" % transform)
@classmethod
def validate_transform(cls, transform):
"""
Validate whether the names of transform class(es) are valid
transforms. Either a single name can be given - representing a
single transform - or a list of transforms - representing a
MapPyGroup. Sub-lists are treated as nested
MapPyGroups. Example list arguments include:
@verbatim
[]
["MapCppTOFDigits", "MapCppTOFSlabHits", "MapCppTOFSpacePoint"]
["MapCppTOFDigits", ["MapCppTOFSlabHits", "MapCppTOFSpacePoint"]]
@endverbatim
Transforms must be in modules named after the transform class
e.g. for the above the transform MapCppTOFSlabHits must be
in a class MapCppTOFSlabHits.MapCppTOFSlabHits.
@param cls Class reference.
@param transform Transform name or list of names.
@throws ValueError if transform is not a string or a list,
or contains an element which is not a list or string, or
specifies an unknown transform name.
"""
if isinstance(transform, ListType):
for transform_name in transform:
cls.validate_transform(transform_name)
elif isinstance(transform, StringType) \
or isinstance(transform, UnicodeType):
return cls._get_transform_class(transform)
else:
raise ValueError("Transform name %s is not a string" % transform)
@classmethod
def get_worker_names(cls, worker):
"""
Given a worker class get the name of the worker. If the
worker is MapPyGroup then a list of worker names is
returned e.g.
@verbatim
["MapCppTOFDigits", "MapCppTOFSlabHits", "MapCppTOFSpacePoint"]
or
["MapCppTOFDigits", ["MapCppTOFSlabHits", "MapCppTOFSpacePoint"]]
@endverbatim
@param cls Class reference.
@param worker Worker.
@return worker name or, for MapPyGroup, list of worker names.
"""
if hasattr(worker, "get_worker_names"):
workers = worker.get_worker_names()
else:
workers = worker.__class__.__name__
return workers
@classmethod
def _get_transform_class(cls, transform):
"""
Get the transform type object (for instantiation) given string name or
raise a ValueError
"""
# nb tempting to use MAUS here - but it doesnt work, as it creates a
# circular dependency
try:
transform_module = __import__(transform)
except ImportError:
try:
transform_module = __import__("_"+transform)
except ImportError:
raise ValueError("No such transform: %s" % transform)
try:
transform_class = getattr(transform_module, transform)
except AttributeError:
raise ValueError("No such transform: %s" % transform)
return transform_class
class WorkerOperationException(Exception):
""" Exception raised if a MAUS worker operation returns False. """
def __init__(self, worker):
"""
Constructor. Overrides Exception.__init__.
@param self Object reference.
@param worker Name of worker that failed.
"""
Exception.__init__(self)
self._worker = worker
class WorkerBirthFailedException(WorkerOperationException):
""" Exception raised if MAUS worker birth returns False. """
def __str__(self):
"""
Return string representation. Overrides Exception.__str__.
@param self Object reference.
@return string.
"""
return "%s returned False" % self._worker
class WorkerDeathFailedException(WorkerOperationException):
""" Exception raised if MAUS worker death returns False. """
def __str__(self):
"""
Return string representation. Overrides Exception.__str__.
@param self Object reference.
@return string.
"""
return "%s returned False" % self._worker
class WorkerDeadException(WorkerOperationException):
"""
Exception raised if MAUS worker process is called but the worker
is dead.
"""
def __str__(self):
"""
Return string representation. Overrides Exception.__str__.
@param self Object reference.
@return string.
"""
return "%s process called after death" % self._worker
class WorkerProcessException(WorkerOperationException):
"""
Exception raised if MAUS worker process throws an exception.
"""
def __init__(self, worker, details = None):
"""
Constructor. Overrides WorkerOperationException.__init__.
@param self Object reference.
@param worker Name of worker that failed.
@param details Details on the exception.
"""
WorkerOperationException.__init__(self, worker)
self._details = details
def __str__(self):
"""
Return string representation. Overrides Exception.__str__.
@param self Object reference.
@return string.
"""
if (self._details == None):
detail_str = ""
else:
detail_str = ": %s" % self._details
return "%s process threw an exception%s" \
% (self._worker, detail_str)
| mice-software/maus | src/common_py/framework/workers.py | Python | gpl-3.0 | 7,931 |
import json
import datetime
import shlex
import os
import urllib
import urllib2
import urlparse
import random
import string
import logging
import pprint
import distutils.version
import sqlalchemy
from sqlalchemy.exc import (ProgrammingError, IntegrityError,
DBAPIError, DataError)
import psycopg2.extras
import ckan.lib.cli as cli
import ckan.plugins.toolkit as toolkit
log = logging.getLogger(__name__)
if not os.environ.get('DATASTORE_LOAD'):
import paste.deploy.converters as converters
ValidationError = toolkit.ValidationError
else:
log.warn("Running datastore without CKAN")
class ValidationError(Exception):
def __init__(self, error_dict):
pprint.pprint(error_dict)
_pg_types = {}
_type_names = set()
_engines = {}
_TIMEOUT = 60000 # milliseconds
# See http://www.postgresql.org/docs/9.2/static/errcodes-appendix.html
_PG_ERR_CODE = {
'unique_violation': '23505',
'query_canceled': '57014',
'undefined_object': '42704',
'syntax_error': '42601',
'permission_denied': '42501',
'duplicate_table': '42P07',
'duplicate_alias': '42712',
}
_DATE_FORMATS = ['%Y-%m-%d',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%SZ',
'%d/%m/%Y',
'%m/%d/%Y',
'%d-%m-%Y',
'%m-%d-%Y']
_INSERT = 'insert'
_UPSERT = 'upsert'
_UPDATE = 'update'
def _strip(input):
if isinstance(input, basestring) and len(input) and input[0] == input[-1]:
return input.strip().strip('"')
return input
def _pluck(field, arr):
return [x[field] for x in arr]
def _get_list(input, strip=True):
'''Transforms a string or list to a list'''
if input is None:
return
if input == '':
return []
l = converters.aslist(input, ',', True)
if strip:
return [_strip(x) for x in l]
else:
return l
def _is_valid_field_name(name):
'''
Check that field name is valid:
* can't start with underscore
* can't contain double quote (")
* can't be empty
'''
return name.strip() and not name.startswith('_') and not '"' in name
def _is_valid_table_name(name):
if '%' in name:
return False
return _is_valid_field_name(name)
def _validate_int(i, field_name, non_negative=False):
try:
i = int(i)
except ValueError:
raise ValidationError({
field_name: ['{0} is not an integer'.format(i)]
})
if non_negative and i < 0:
raise ValidationError({
field_name: ['{0} is not a non-negative integer'.format(i)]
})
def _get_engine(data_dict):
'''Get either read or write engine.'''
connection_url = data_dict['connection_url']
engine = _engines.get(connection_url)
if not engine:
engine = sqlalchemy.create_engine(connection_url)
_engines[connection_url] = engine
return engine
def _cache_types(context):
if not _pg_types:
connection = context['connection']
results = connection.execute(
'SELECT oid, typname FROM pg_type;'
)
for result in results:
_pg_types[result[0]] = result[1]
_type_names.add(result[1])
if 'nested' not in _type_names:
native_json = _pg_version_is_at_least(connection, '9.2')
log.info("Create nested type. Native JSON: {0}".format(
native_json))
import pylons
data_dict = {
'connection_url': pylons.config['ckan.datastore.write_url']}
engine = _get_engine(data_dict)
with engine.begin() as connection:
connection.execute(
'CREATE TYPE "nested" AS (json {0}, extra text)'.format(
'json' if native_json else 'text'))
_pg_types.clear()
## redo cache types with json now available.
return _cache_types(context)
psycopg2.extras.register_composite('nested',
connection.connection,
True)
def _pg_version_is_at_least(connection, version):
try:
v = distutils.version.LooseVersion(version)
pg_version = connection.execute('select version();').fetchone()
pg_version_number = pg_version[0].split()[1]
pv = distutils.version.LooseVersion(pg_version_number)
return v <= pv
except ValueError:
return False
def _is_valid_pg_type(context, type_name):
if type_name in _type_names:
return True
else:
connection = context['connection']
try:
connection.execute('SELECT %s::regtype', type_name)
except ProgrammingError, e:
if e.orig.pgcode in [_PG_ERR_CODE['undefined_object'],
_PG_ERR_CODE['syntax_error']]:
return False
raise
else:
return True
def _get_type(context, oid):
_cache_types(context)
return _pg_types[oid]
def _rename_json_field(data_dict):
'''Rename json type to a corresponding type for the datastore since
pre 9.2 postgres versions do not support native json'''
return _rename_field(data_dict, 'json', 'nested')
def _unrename_json_field(data_dict):
return _rename_field(data_dict, 'nested', 'json')
def _rename_field(data_dict, term, replace):
fields = data_dict.get('fields', [])
for i, field in enumerate(fields):
if 'type' in field and field['type'] == term:
data_dict['fields'][i]['type'] = replace
return data_dict
def _guess_type(field):
'''Simple guess type of field, only allowed are
integer, numeric and text'''
data_types = set([int, float])
if isinstance(field, (dict, list)):
return 'nested'
if isinstance(field, int):
return 'int'
if isinstance(field, float):
return 'float'
for data_type in list(data_types):
try:
data_type(field)
except (TypeError, ValueError):
data_types.discard(data_type)
if not data_types:
break
if int in data_types:
return 'integer'
elif float in data_types:
return 'numeric'
##try iso dates
for format in _DATE_FORMATS:
try:
datetime.datetime.strptime(field, format)
return 'timestamp'
except (ValueError, TypeError):
continue
return 'text'
def _get_fields(context, data_dict):
fields = []
all_fields = context['connection'].execute(
u'SELECT * FROM "{0}" LIMIT 1'.format(data_dict['resource_id'])
)
for field in all_fields.cursor.description:
if not field[0].startswith('_'):
fields.append({
'id': field[0].decode('utf-8'),
'type': _get_type(context, field[1])
})
return fields
def json_get_values(obj, current_list=None):
if current_list is None:
current_list = []
if isinstance(obj, basestring):
current_list.append(obj)
if isinstance(obj, list):
for item in obj:
json_get_values(item, current_list)
if isinstance(obj, dict):
for item in obj.values():
json_get_values(item, current_list)
return current_list
def check_fields(context, fields):
'''Check if field types are valid.'''
for field in fields:
if field.get('type') and not _is_valid_pg_type(context, field['type']):
raise ValidationError({
'fields': ['"{0}" is not a valid field type'.format(
field['type'])]
})
elif not _is_valid_field_name(field['id']):
raise ValidationError({
'fields': ['"{0}" is not a valid field name'.format(
field['id'])]
})
def convert(data, type_name):
if data is None:
return None
if type_name == 'nested':
return json.loads(data[0])
# array type
if type_name.startswith('_'):
sub_type = type_name[1:]
return [convert(item, sub_type) for item in data]
if type_name == 'tsvector':
return unicode(data, 'utf-8')
if isinstance(data, datetime.datetime):
return data.isoformat()
if isinstance(data, (int, float)):
return data
return unicode(data)
def create_table(context, data_dict):
'''Create table from combination of fields and first row of data.'''
datastore_fields = [
{'id': '_id', 'type': 'serial primary key'},
{'id': '_full_text', 'type': 'tsvector'},
]
# check first row of data for additional fields
extra_fields = []
supplied_fields = data_dict.get('fields', [])
check_fields(context, supplied_fields)
field_ids = _pluck('id', supplied_fields)
records = data_dict.get('records')
# if type is field is not given try and guess or throw an error
for field in supplied_fields:
if 'type' not in field:
if not records or field['id'] not in records[0]:
raise ValidationError({
'fields': ['"{0}" type not guessable'.format(field['id'])]
})
field['type'] = _guess_type(records[0][field['id']])
if records:
# check record for sanity
if not isinstance(records[0], dict):
raise ValidationError({
'records': ['The first row is not a json object']
})
supplied_field_ids = records[0].keys()
for field_id in supplied_field_ids:
if not field_id in field_ids:
extra_fields.append({
'id': field_id,
'type': _guess_type(records[0][field_id])
})
fields = datastore_fields + supplied_fields + extra_fields
sql_fields = u", ".join([u'"{0}" {1}'.format(
f['id'], f['type']) for f in fields])
sql_string = u'CREATE TABLE "{0}" ({1});'.format(
data_dict['resource_id'],
sql_fields
)
context['connection'].execute(sql_string.replace('%', '%%'))
def _get_aliases(context, data_dict):
'''Get a list of aliases for a resource.'''
res_id = data_dict['resource_id']
alias_sql = sqlalchemy.text(
u'SELECT name FROM "_table_metadata" WHERE alias_of = :id')
results = context['connection'].execute(alias_sql, id=res_id).fetchall()
return [x[0] for x in results]
def _get_resources(context, alias):
'''Get a list of resources for an alias. There could be more than one alias
in a resource_dict.'''
alias_sql = sqlalchemy.text(
u'''SELECT alias_of FROM "_table_metadata"
WHERE name = :alias AND alias_of IS NOT NULL''')
results = context['connection'].execute(alias_sql, alias=alias).fetchall()
return [x[0] for x in results]
def create_alias(context, data_dict):
aliases = _get_list(data_dict.get('aliases'))
if aliases is not None:
# delete previous aliases
previous_aliases = _get_aliases(context, data_dict)
for alias in previous_aliases:
sql_alias_drop_string = u'DROP VIEW "{0}"'.format(alias)
context['connection'].execute(sql_alias_drop_string)
try:
for alias in aliases:
sql_alias_string = u'''CREATE VIEW "{alias}"
AS SELECT * FROM "{main}"'''.format(
alias=alias,
main=data_dict['resource_id']
)
res_ids = _get_resources(context, alias)
if res_ids:
raise ValidationError({
'alias': [(u'The alias "{0}" already exists.').format(
alias)]
})
context['connection'].execute(sql_alias_string)
except DBAPIError, e:
if e.orig.pgcode in [_PG_ERR_CODE['duplicate_table'],
_PG_ERR_CODE['duplicate_alias']]:
raise ValidationError({
'alias': ['"{0}" already exists'.format(alias)]
})
def create_indexes(context, data_dict):
indexes = _get_list(data_dict.get('indexes'))
# primary key is not a real primary key
# it's just a unique key
primary_key = _get_list(data_dict.get('primary_key'))
# index and primary key could be [],
# which means that indexes should be deleted
if indexes is None and primary_key is None:
return
sql_index_tmpl = u'CREATE {unique} INDEX {name} ON "{res_id}"'
sql_index_string_method = sql_index_tmpl + u' USING {method}({fields})'
sql_index_string = sql_index_tmpl + u' ({fields})'
sql_index_strings = []
fields = _get_fields(context, data_dict)
field_ids = _pluck('id', fields)
json_fields = [x['id'] for x in fields if x['type'] == 'nested']
def generate_index_name():
# pg 9.0+ do not require an index name
if _pg_version_is_at_least(context['connection'], '9.0'):
return ''
else:
src = string.ascii_letters + string.digits
random_string = ''.join([random.choice(src) for n in xrange(10)])
return 'idx_' + random_string
if indexes is not None:
_drop_indexes(context, data_dict, False)
# create index for faster full text search (indexes: gin or gist)
sql_index_strings.append(sql_index_string_method.format(
res_id=data_dict['resource_id'],
unique='',
name=generate_index_name(),
method='gist', fields='_full_text'))
else:
indexes = []
if primary_key is not None:
_drop_indexes(context, data_dict, True)
indexes.append(primary_key)
for index in indexes:
if not index:
continue
index_fields = _get_list(index)
for field in index_fields:
if field not in field_ids:
raise ValidationError({
'index': [
('The field "{0}" is not a valid column name.').format(
index)]
})
fields_string = u', '.join(
['(("{0}").json::text)'.format(field)
if field in json_fields else
'"%s"' % field
for field in index_fields])
sql_index_strings.append(sql_index_string.format(
res_id=data_dict['resource_id'],
unique='unique' if index == primary_key else '',
name=generate_index_name(),
fields=fields_string))
sql_index_strings = map(lambda x: x.replace('%', '%%'), sql_index_strings)
map(context['connection'].execute, sql_index_strings)
def _drop_indexes(context, data_dict, unique=False):
sql_drop_index = u'DROP INDEX "{0}" CASCADE'
sql_get_index_string = u"""
SELECT
i.relname AS index_name
FROM
pg_class t,
pg_class i,
pg_index idx
WHERE
t.oid = idx.indrelid
AND i.oid = idx.indexrelid
AND t.relkind = 'r'
AND idx.indisunique = {unique}
AND idx.indisprimary = false
AND t.relname = %s
""".format(unique='true' if unique else 'false')
indexes_to_drop = context['connection'].execute(
sql_get_index_string, data_dict['resource_id']).fetchall()
for index in indexes_to_drop:
context['connection'].execute(
sql_drop_index.format(index[0]).replace('%', '%%'))
def alter_table(context, data_dict):
'''alter table from combination of fields and first row of data
return: all fields of the resource table'''
supplied_fields = data_dict.get('fields', [])
current_fields = _get_fields(context, data_dict)
if not supplied_fields:
supplied_fields = current_fields
check_fields(context, supplied_fields)
field_ids = _pluck('id', supplied_fields)
records = data_dict.get('records')
new_fields = []
for num, field in enumerate(supplied_fields):
# check to see if field definition is the same or and
# extension of current fields
if num < len(current_fields):
if field['id'] != current_fields[num]['id']:
raise ValidationError({
'fields': [('Supplied field "{0}" not '
'present or in wrong order').format(
field['id'])]
})
## no need to check type as field already defined.
continue
if 'type' not in field:
if not records or field['id'] not in records[0]:
raise ValidationError({
'fields': ['"{0}" type not guessable'.format(field['id'])]
})
field['type'] = _guess_type(records[0][field['id']])
new_fields.append(field)
if records:
# check record for sanity as they have not been
# checked during validation
if not isinstance(records, list):
raise ValidationError({
'records': ['Records has to be a list of dicts']
})
if not isinstance(records[0], dict):
raise ValidationError({
'records': ['The first row is not a json object']
})
supplied_field_ids = records[0].keys()
for field_id in supplied_field_ids:
if not field_id in field_ids:
new_fields.append({
'id': field_id,
'type': _guess_type(records[0][field_id])
})
for field in new_fields:
sql = 'ALTER TABLE "{0}" ADD "{1}" {2}'.format(
data_dict['resource_id'],
field['id'],
field['type'])
context['connection'].execute(sql.replace('%', '%%'))
def insert_data(context, data_dict):
data_dict['method'] = _INSERT
return upsert_data(context, data_dict)
def upsert_data(context, data_dict):
'''insert all data from records'''
if not data_dict.get('records'):
return
method = data_dict.get('method', _UPSERT)
fields = _get_fields(context, data_dict)
field_names = _pluck('id', fields)
records = data_dict['records']
sql_columns = ", ".join(['"%s"' % name.replace(
'%', '%%') for name in field_names] + ['"_full_text"'])
if method == _INSERT:
rows = []
for num, record in enumerate(records):
_validate_record(record, num, field_names)
row = []
for field in fields:
value = record.get(field['id'])
if value and field['type'].lower() == 'nested':
## a tuple with an empty second value
value = (json.dumps(value), '')
row.append(value)
row.append(_to_full_text(fields, record))
rows.append(row)
sql_string = u'''INSERT INTO "{res_id}" ({columns})
VALUES ({values}, to_tsvector(%s));'''.format(
res_id=data_dict['resource_id'],
columns=sql_columns,
values=', '.join(['%s' for field in field_names])
)
context['connection'].execute(sql_string, rows)
elif method in [_UPDATE, _UPSERT]:
unique_keys = _get_unique_key(context, data_dict)
if len(unique_keys) < 1:
raise ValidationError({
'table': [u'table does not have a unique key defined']
})
for num, record in enumerate(records):
# all key columns have to be defined
missing_fields = [field for field in unique_keys
if field not in record]
if missing_fields:
raise ValidationError({
'key': [u'''fields "{fields}" are missing
but needed as key'''.format(
fields=', '.join(missing_fields))]
})
for field in fields:
value = record.get(field['id'])
if value and field['type'].lower() == 'nested':
## a tuple with an empty second value
record[field['id']] = (json.dumps(value), '')
non_existing_filed_names = [field for field in record
if field not in field_names]
if non_existing_filed_names:
raise ValidationError({
'fields': [u'fields "{0}" do not exist'.format(
', '.join(missing_fields))]
})
unique_values = [record[key] for key in unique_keys]
used_fields = [field for field in fields
if field['id'] in record]
used_field_names = _pluck('id', used_fields)
used_values = [record[field] for field in used_field_names]
full_text = _to_full_text(fields, record)
if method == _UPDATE:
sql_string = u'''
UPDATE "{res_id}"
SET ({columns}, "_full_text") = ({values}, to_tsvector(%s))
WHERE ({primary_key}) = ({primary_value});
'''.format(
res_id=data_dict['resource_id'],
columns=u', '.join(
[u'"{0}"'.format(field)
for field in used_field_names]),
values=u', '.join(
['%s' for _ in used_field_names]),
primary_key=u','.join(
[u'"{0}"'.format(part) for part in unique_keys]),
primary_value=u','.join(["%s"] * len(unique_keys))
)
results = context['connection'].execute(
sql_string, used_values + [full_text] + unique_values)
# validate that exactly one row has been updated
if results.rowcount != 1:
raise ValidationError({
'key': [u'key "{0}" not found'.format(unique_values)]
})
elif method == _UPSERT:
sql_string = u'''
UPDATE "{res_id}"
SET ({columns}, "_full_text") = ({values}, to_tsvector(%s))
WHERE ({primary_key}) = ({primary_value});
INSERT INTO "{res_id}" ({columns}, "_full_text")
SELECT {values}, to_tsvector(%s)
WHERE NOT EXISTS (SELECT 1 FROM "{res_id}"
WHERE ({primary_key}) = ({primary_value}));
'''.format(
res_id=data_dict['resource_id'],
columns=u', '.join([u'"{0}"'.format(field)
for field in used_field_names]),
values=u', '.join(['%s::nested'
if field['type'] == 'nested' else '%s'
for field in used_fields]),
primary_key=u','.join([u'"{0}"'.format(part)
for part in unique_keys]),
primary_value=u','.join(["%s"] * len(unique_keys))
)
context['connection'].execute(
sql_string,
(used_values + [full_text] + unique_values) * 2)
def _get_unique_key(context, data_dict):
sql_get_unique_key = '''
SELECT
a.attname AS column_names
FROM
pg_class t,
pg_index idx,
pg_attribute a
WHERE
t.oid = idx.indrelid
AND a.attrelid = t.oid
AND a.attnum = ANY(idx.indkey)
AND t.relkind = 'r'
AND idx.indisunique = true
AND idx.indisprimary = false
AND t.relname = %s
'''
key_parts = context['connection'].execute(sql_get_unique_key,
data_dict['resource_id'])
return [x[0] for x in key_parts]
def _validate_record(record, num, field_names):
# check record for sanity
if not isinstance(record, dict):
raise ValidationError({
'records': [u'row "{0}" is not a json object'.format(num)]
})
## check for extra fields in data
extra_keys = set(record.keys()) - set(field_names)
if extra_keys:
raise ValidationError({
'records': [u'row "{0}" has extra keys "{1}"'.format(
num + 1,
', '.join(list(extra_keys))
)]
})
def _to_full_text(fields, record):
full_text = []
for field in fields:
value = record.get(field['id'])
if field['type'].lower() == 'nested' and value:
full_text.extend(json_get_values(value))
elif field['type'].lower() == 'text' and value:
full_text.append(value)
return ' '.join(full_text)
def _where(field_ids, data_dict):
'''Return a SQL WHERE clause from data_dict filters and q'''
filters = data_dict.get('filters', {})
if not isinstance(filters, dict):
raise ValidationError({
'filters': ['Not a json object']}
)
where_clauses = []
values = []
for field, value in filters.iteritems():
if field not in field_ids:
raise ValidationError({
'filters': ['field "{0}" not in table'.format(field)]}
)
where_clauses.append(u'"{0}" = %s'.format(field))
values.append(value)
# add full-text search where clause
if data_dict.get('q'):
where_clauses.append(u'_full_text @@ query')
where_clause = u' AND '.join(where_clauses)
if where_clause:
where_clause = u'WHERE ' + where_clause
return where_clause, values
def _textsearch_query(data_dict):
q = data_dict.get('q')
lang = data_dict.get(u'language', u'english')
if q:
if data_dict.get('plain', True):
statement = u", plainto_tsquery('{lang}', '{query}') query"
else:
statement = u", to_tsquery('{lang}', '{query}') query"
rank_column = u', ts_rank(_full_text, query, 32) AS rank'
return statement.format(lang=lang, query=q), rank_column
return '', ''
def _sort(context, data_dict, field_ids):
sort = data_dict.get('sort')
if not sort:
if data_dict.get('q'):
return u'ORDER BY rank'
else:
return u''
clauses = _get_list(sort, False)
clause_parsed = []
for clause in clauses:
clause = clause.encode('utf-8')
if clause.endswith(' asc'):
field, sort = clause[:-4], 'asc'
elif clause.endswith(' desc'):
field, sort = clause[:-5], 'desc'
else:
field, sort = clause, 'asc'
field, sort = unicode(field, 'utf-8'), unicode(sort, 'utf-8')
if field not in field_ids:
raise ValidationError({
'sort': [u'field "{0}" not in table'.format(
field)]
})
if sort.lower() not in ('asc', 'desc'):
raise ValidationError({
'sort': ['sorting can only be asc or desc']
})
clause_parsed.append(u'"{0}" {1}'.format(
field, sort)
)
if clause_parsed:
return "order by " + ", ".join(clause_parsed)
def _insert_links(data_dict, limit, offset):
'''Adds link to the next/prev part (same limit, offset=offset+limit)
and the resource page.'''
data_dict['_links'] = {}
# get the url from the request
try:
urlstring = toolkit.request.environ['CKAN_CURRENT_URL']
except TypeError:
return # no links required for local actions
# change the offset in the url
parsed = list(urlparse.urlparse(urlstring))
query = urllib2.unquote(parsed[4])
arguments = dict(urlparse.parse_qsl(query))
arguments_start = dict(arguments)
arguments_prev = dict(arguments)
arguments_next = dict(arguments)
if 'offset' in arguments_start:
arguments_start.pop('offset')
arguments_next['offset'] = int(offset) + int(limit)
arguments_prev['offset'] = int(offset) - int(limit)
parsed_start = parsed[:]
parsed_prev = parsed[:]
parsed_next = parsed[:]
parsed_start[4] = urllib.urlencode(arguments_start)
parsed_next[4] = urllib.urlencode(arguments_next)
parsed_prev[4] = urllib.urlencode(arguments_prev)
# add the links to the data dict
data_dict['_links']['start'] = urlparse.urlunparse(parsed_start)
data_dict['_links']['next'] = urlparse.urlunparse(parsed_next)
if int(offset) - int(limit) > 0:
data_dict['_links']['prev'] = urlparse.urlunparse(parsed_prev)
def delete_data(context, data_dict):
fields = _get_fields(context, data_dict)
field_ids = set([field['id'] for field in fields])
where_clause, where_values = _where(field_ids, data_dict)
context['connection'].execute(
u'DELETE FROM "{0}" {1}'.format(
data_dict['resource_id'],
where_clause
),
where_values
)
def search_data(context, data_dict):
all_fields = _get_fields(context, data_dict)
all_field_ids = _pluck('id', all_fields)
all_field_ids.insert(0, '_id')
fields = data_dict.get('fields')
if fields:
field_ids = _get_list(fields)
for field in field_ids:
if not field in all_field_ids:
raise ValidationError({
'fields': [u'field "{0}" not in table'.format(field)]}
)
else:
field_ids = all_field_ids
select_columns = ', '.join([u'"{0}"'.format(field_id)
for field_id in field_ids])
ts_query, rank_column = _textsearch_query(data_dict)
where_clause, where_values = _where(all_field_ids, data_dict)
limit = data_dict.get('limit', 100)
offset = data_dict.get('offset', 0)
_validate_int(limit, 'limit', non_negative=True)
_validate_int(offset, 'offset', non_negative=True)
if 'limit' in data_dict:
data_dict['limit'] = int(limit)
if 'offset' in data_dict:
data_dict['offset'] = int(offset)
sort = _sort(context, data_dict, field_ids)
sql_string = u'''SELECT {select}, count(*) over() AS "_full_count" {rank}
FROM "{resource}" {ts_query}
{where} {sort} LIMIT {limit} OFFSET {offset}'''.format(
select=select_columns,
rank=rank_column,
resource=data_dict['resource_id'],
ts_query=ts_query,
where='{where}',
sort=sort,
limit=limit,
offset=offset)
sql_string = sql_string.replace('%', '%%')
results = context['connection'].execute(
sql_string.format(where=where_clause), [where_values])
_insert_links(data_dict, limit, offset)
return format_results(context, results, data_dict)
def format_results(context, results, data_dict):
result_fields = []
for field in results.cursor.description:
result_fields.append({
'id': field[0].decode('utf-8'),
'type': _get_type(context, field[1])
})
if len(result_fields) and result_fields[-1]['id'] == '_full_count':
result_fields.pop() # remove _full_count
records = []
for row in results:
converted_row = {}
if '_full_count' in row:
data_dict['total'] = row['_full_count']
for field in result_fields:
converted_row[field['id']] = convert(row[field['id']],
field['type'])
records.append(converted_row)
data_dict['records'] = records
data_dict['fields'] = result_fields
return _unrename_json_field(data_dict)
def _is_single_statement(sql):
return not ';' in sql.strip(';')
def create(context, data_dict):
'''
The first row will be used to guess types not in the fields and the
guessed types will be added to the headers permanently.
Consecutive rows have to conform to the field definitions.
rows can be empty so that you can just set the fields.
fields are optional but needed if you want to do type hinting or
add extra information for certain columns or to explicitly
define ordering.
eg: [{"id": "dob", "type": "timestamp"},
{"id": "name", "type": "text"}]
A header items values can not be changed after it has been defined
nor can the ordering of them be changed. They can be extended though.
Any error results in total failure! For now pass back the actual error.
Should be transactional.
'''
engine = _get_engine(data_dict)
context['connection'] = engine.connect()
timeout = context.get('query_timeout', _TIMEOUT)
_cache_types(context)
_rename_json_field(data_dict)
trans = context['connection'].begin()
try:
# check if table already existes
context['connection'].execute(
u'SET LOCAL statement_timeout TO {0}'.format(timeout))
result = context['connection'].execute(
u'SELECT * FROM pg_tables WHERE tablename = %s',
data_dict['resource_id']
).fetchone()
if not result:
create_table(context, data_dict)
else:
alter_table(context, data_dict)
insert_data(context, data_dict)
create_indexes(context, data_dict)
create_alias(context, data_dict)
if data_dict.get('private'):
_change_privilege(context, data_dict, 'REVOKE')
trans.commit()
return _unrename_json_field(data_dict)
except IntegrityError, e:
if e.orig.pgcode == _PG_ERR_CODE['unique_violation']:
raise ValidationError({
'constraints': ['Cannot insert records or create index because'
' of uniqueness constraint'],
'info': {
'orig': str(e.orig),
'pgcode': e.orig.pgcode
}
})
raise
except DataError, e:
raise ValidationError({
'data': e.message,
'info': {
'orig': [str(e.orig)]
}})
except DBAPIError, e:
if e.orig.pgcode == _PG_ERR_CODE['query_canceled']:
raise ValidationError({
'query': ['Query took too long']
})
raise
except Exception, e:
trans.rollback()
raise
finally:
context['connection'].close()
def upsert(context, data_dict):
'''
This method combines upsert insert and update on the datastore. The method
that will be used is defined in the mehtod variable.
Any error results in total failure! For now pass back the actual error.
Should be transactional.
'''
engine = _get_engine(data_dict)
context['connection'] = engine.connect()
timeout = context.get('query_timeout', _TIMEOUT)
trans = context['connection'].begin()
try:
# check if table already existes
context['connection'].execute(
u'SET LOCAL statement_timeout TO {0}'.format(timeout))
upsert_data(context, data_dict)
trans.commit()
return _unrename_json_field(data_dict)
except IntegrityError, e:
if e.orig.pgcode == _PG_ERR_CODE['unique_violation']:
raise ValidationError({
'constraints': ['Cannot insert records or create index because'
' of uniqueness constraint'],
'info': {
'orig': str(e.orig),
'pgcode': e.orig.pgcode
}
})
raise
except DataError, e:
raise ValidationError({
'data': e.message,
'info': {
'orig': [str(e.orig)]
}})
except DBAPIError, e:
if e.orig.pgcode == _PG_ERR_CODE['query_canceled']:
raise ValidationError({
'query': ['Query took too long']
})
raise
except Exception, e:
trans.rollback()
raise
finally:
context['connection'].close()
def delete(context, data_dict):
engine = _get_engine(data_dict)
context['connection'] = engine.connect()
_cache_types(context)
trans = context['connection'].begin()
try:
# check if table exists
if not 'filters' in data_dict:
context['connection'].execute(
u'DROP TABLE "{0}" CASCADE'.format(data_dict['resource_id'])
)
else:
delete_data(context, data_dict)
trans.commit()
return _unrename_json_field(data_dict)
except Exception:
trans.rollback()
raise
finally:
context['connection'].close()
def search(context, data_dict):
engine = _get_engine(data_dict)
context['connection'] = engine.connect()
timeout = context.get('query_timeout', _TIMEOUT)
_cache_types(context)
try:
context['connection'].execute(
u'SET LOCAL statement_timeout TO {0}'.format(timeout))
return search_data(context, data_dict)
except DBAPIError, e:
if e.orig.pgcode == _PG_ERR_CODE['query_canceled']:
raise ValidationError({
'query': ['Search took too long']
})
raise ValidationError({
'query': ['Invalid query'],
'info': {
'statement': [e.statement],
'params': [e.params],
'orig': [str(e.orig)]
}
})
finally:
context['connection'].close()
def search_sql(context, data_dict):
engine = _get_engine(data_dict)
context['connection'] = engine.connect()
timeout = context.get('query_timeout', _TIMEOUT)
_cache_types(context)
try:
context['connection'].execute(
u'SET LOCAL statement_timeout TO {0}'.format(timeout))
results = context['connection'].execute(
data_dict['sql'].replace('%', '%%')
)
return format_results(context, results, data_dict)
except ProgrammingError, e:
if e.orig.pgcode == _PG_ERR_CODE['permission_denied']:
raise toolkit.NotAuthorized({
'permissions': ['Not authorized to read resource.']
})
raise ValidationError({
'query': [str(e)],
'info': {
'statement': [e.statement],
'params': [e.params],
'orig': [str(e.orig)]
}
})
except DBAPIError, e:
if e.orig.pgcode == _PG_ERR_CODE['query_canceled']:
raise ValidationError({
'query': ['Query took too long']
})
raise
finally:
context['connection'].close()
def _get_read_only_user(data_dict):
parsed = cli.parse_db_config('ckan.datastore.read_url')
return parsed['db_user']
def _change_privilege(context, data_dict, what):
''' We need a transaction for this code to work '''
read_only_user = _get_read_only_user(data_dict)
if what == 'REVOKE':
sql = u'REVOKE SELECT ON TABLE "{0}" FROM "{1}"'.format(
data_dict['resource_id'],
read_only_user)
elif what == 'GRANT':
sql = u'GRANT SELECT ON TABLE "{0}" TO "{1}"'.format(
data_dict['resource_id'],
read_only_user)
else:
raise ValidationError({
'privileges': 'Can only GRANT or REVOKE but not {0}'.format(what)})
try:
context['connection'].execute(sql)
except ProgrammingError, e:
log.critical("Error making resource private. {0}".format(e.message))
raise ValidationError({
'privileges': [u'cannot make "{0}" private'.format(
data_dict['resource_id'])],
'info': {
'orig': str(e.orig),
'pgcode': e.orig.pgcode
}
})
def make_private(context, data_dict):
log.info('Making resource {0} private'.format(
data_dict['resource_id']))
engine = _get_engine(data_dict)
context['connection'] = engine.connect()
trans = context['connection'].begin()
try:
_change_privilege(context, data_dict, 'REVOKE')
trans.commit()
finally:
context['connection'].close()
def make_public(context, data_dict):
log.info('Making resource {0} public'.format(
data_dict['resource_id']))
engine = _get_engine(data_dict)
context['connection'] = engine.connect()
trans = context['connection'].begin()
try:
_change_privilege(context, data_dict, 'GRANT')
trans.commit()
finally:
context['connection'].close()
| sciamlab/ckanext-lait | custom/ckan/ckanext/datastore/db.py | Python | apache-2.0 | 40,892 |
def print_rangoli(size):
# your code goes here
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
| jerodg/hackerrank-python | python/02.Strings/10.AlphabetRangoli/template.py | Python | mit | 118 |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for the dns tool."""
import functools
import json
import sys
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.third_party.apitools.base.py import exceptions as apitools_exceptions
def GetError(error, verbose=False):
"""Returns a ready-to-print string representation from the http response.
Args:
error: A string representing the raw json of the Http error response.
verbose: Whether or not to print verbose messages [default false]
Returns:
A ready-to-print string representation of the error.
"""
data = json.loads(error.content)
reasons = ','.join([x['reason'] for x in data['error']['errors']])
status = data['error']['code']
message = data['error']['message']
code = error.resp.reason
if verbose:
PrettyPrint(data)
return ('ResponseError: status=%s, code=%s, reason(s)=%s\nmessage=%s' %
(str(status), code, reasons, message))
def GetErrorMessage(error):
error = json.loads(error.content).get('error', {})
return '\n{0} (code: {1})'.format(error.get('message', ''), error.get('code',
''))
def HandleHttpError(func):
"""Decorator that catches HttpError and raises corresponding HttpException."""
@functools.wraps(func)
def CatchHTTPErrorRaiseHTTPException(*args, **kwargs):
try:
return func(*args, **kwargs)
except apitools_exceptions.HttpError as error:
msg = GetErrorMessage(error)
unused_type, unused_value, traceback = sys.exc_info()
raise exceptions.HttpException, msg, traceback
return CatchHTTPErrorRaiseHTTPException
def PrettyPrintString(value):
return json.dumps(value, sort_keys=True, indent=4, separators=(',', ': '))
def PrettyPrint(value):
print PrettyPrintString(value)
def AppendTrailingDot(name):
return name if not name or name.endswith('.') else name + '.'
ZONE_FLAG = base.Argument(
'--zone',
'-z',
completion_resource='dns.managedZones',
help='Name of the managed-zone whose record-sets you want to manage.',
required=True)
| flgiordano/netcash | +/google-cloud-sdk/lib/googlecloudsdk/api_lib/dns/util.py | Python | bsd-3-clause | 2,739 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-12 11:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0155_auto_20190205_1344'),
]
operations = [
migrations.AlterModelOptions(
name='relationshipproof',
options={'verbose_name': '\u041f\u043e\u0441\u0438\u043b\u0430\u043d\u043d\u044f \u0430\u0431\u043e \u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442', 'verbose_name_plural': '\u041f\u043e\u0441\u0438\u043b\u0430\u043d\u043d\u044f \u0430\u0431\u043e \u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0438'},
),
migrations.AddField(
model_name='feedbackmessage',
name='read_and_agreed',
field=models.BooleanField(default=False, verbose_name='\u041a\u043e\u0440\u0438\u0441\u0442\u0443\u0432\u0430\u0447 \u043f\u0456\u0434\u0442\u0432\u0435\u0440\u0434\u0438\u0432 \u0449\u043e \u043f\u0440\u043e\u0447\u0438\u0442\u0430\u0432 \u0447\u0430\u0441\u0442\u043e \u0437\u0430\u0434\u0430\u0432\u0430\u0454\u043c\u0456 \u043f\u0438\u0442\u0430\u043d\u043d\u044f'),
),
migrations.AlterField(
model_name='relationshipproof',
name='proof',
field=models.TextField(blank=True, verbose_name='\u0430\u0431\u043e \u043f\u043e\u0441\u0438\u043b\u0430\u043d\u043d\u044f'),
),
migrations.AlterField(
model_name='relationshipproof',
name='proof_document',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Document', verbose_name='\u0424\u0430\u0439\u043b \u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430'),
),
migrations.AlterField(
model_name='relationshipproof',
name='proof_title',
field=models.TextField(blank=True, help_text='\u041d\u0430\u043f\u0440\u0438\u043a\u043b\u0430\u0434: \u0441\u043a\u043b\u0430\u0434 \u0412\u0420 7-\u0433\u043e \u0441\u043a\u043b\u0438\u043a\u0430\u043d\u043d\u044f', verbose_name='\u041d\u0430\u0437\u0432\u0430 \u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430 \u0430\u0431\u043e \u043f\u043e\u0441\u0438\u043b\u0430\u043d\u043d\u044f'),
),
migrations.AlterField(
model_name='relationshipproof',
name='proof_title_en',
field=models.TextField(blank=True, help_text='\u041d\u0430\u043f\u0440\u0438\u043a\u043b\u0430\u0434: \u0441\u043a\u043b\u0430\u0434 \u0412\u0420 7-\u0433\u043e \u0441\u043a\u043b\u0438\u043a\u0430\u043d\u043d\u044f', null=True, verbose_name='\u041d\u0430\u0437\u0432\u0430 \u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430 \u0430\u0431\u043e \u043f\u043e\u0441\u0438\u043b\u0430\u043d\u043d\u044f'),
),
migrations.AlterField(
model_name='relationshipproof',
name='proof_title_uk',
field=models.TextField(blank=True, help_text='\u041d\u0430\u043f\u0440\u0438\u043a\u043b\u0430\u0434: \u0441\u043a\u043b\u0430\u0434 \u0412\u0420 7-\u0433\u043e \u0441\u043a\u043b\u0438\u043a\u0430\u043d\u043d\u044f', null=True, verbose_name='\u041d\u0430\u0437\u0432\u0430 \u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430 \u0430\u0431\u043e \u043f\u043e\u0441\u0438\u043b\u0430\u043d\u043d\u044f'),
),
]
| dchaplinsky/pep.org.ua | pepdb/core/migrations/0156_auto_20190512_1445.py | Python | mit | 3,445 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para netload
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
from core import logger
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[netload.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://netload.in/dateiroqHV0QNJg/Salmon.Fishing.in.the.Yemen.2012.720p.UNSOLOCLIC.INFO.mkv.htm
patronvideos = '(netload.in/[a-zA-Z0-9]+/.*?.htm)'
logger.info("[netload.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data+'"')
for match in matches:
titulo = "[netload]"
url = "http://"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'netload' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# http://netload.in/datei2OuYAjcVGq.htm
patronvideos = '(netload.in/[a-zA-Z0-9]+.htm)'
logger.info("[netload.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data+'"')
for match in matches:
titulo = "[netload]"
url = "http://"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'netload' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| ChopChopKodi/pelisalacarta | python/main-classic/servers/netload.py | Python | gpl-3.0 | 1,786 |
"""Test using colorlog with logging.config"""
import logging
import logging.config
import os.path
def path(filename):
"""Return an absolute path to a file in the current directory."""
return os.path.join(os.path.dirname(os.path.realpath(__file__)), filename)
def test_build_from_file(test_logger):
logging.config.fileConfig(path("test_config.ini"))
test_logger(logging.getLogger(), lambda l: ":test_config.ini" in l)
def test_build_from_dictionary(test_logger):
logging.config.dictConfig(
{
"version": 1,
"formatters": {
"colored": {
"()": "colorlog.ColoredFormatter",
"format": "%(log_color)s%(levelname)s:%(name)s:%(message)s:dict",
}
},
"handlers": {
"stream": {
"class": "logging.StreamHandler",
"formatter": "colored",
"level": "DEBUG",
},
},
"loggers": {
"": {
"handlers": ["stream"],
"level": "DEBUG",
},
},
}
)
test_logger(logging.getLogger(), lambda l: ":dict" in l)
| borntyping/python-colorlog | colorlog/tests/test_config.py | Python | mit | 1,247 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Admin SDK Directory API."""
import unittest
from directory_api_users_test_base import DirectoryApiUsersTestBase
from utils import admin_api_tool_errors
class DirectoryApiDeleteUserTest(DirectoryApiUsersTestBase):
"""Tests api's used by rm_user.py."""
def setUp(self):
"""Need users to simulate user actions."""
super(DirectoryApiDeleteUserTest, self).setUp()
self._user_email = 'nratchitt@%s' % self.primary_domain
self._unknown_user_email = 'zzzzz@%s' % self.primary_domain
self._unknown_user_email = 'nratchitt@%s' % self.unknown_domain
self._first_name = 'Nurse'
self._last_name = 'Ratchitt'
self._password = 'Google123'
self.test_users_manager.AddTestUser(self._first_name, self._last_name,
self._user_email)
def tearDown(self):
"""Clean up simulated users."""
self.test_users_manager.DeleteTestUser(self._user_email)
def testCanDeleteExistingDomainUser(self):
self.assertIsNone(
self._api_wrapper.DeleteDomainUser(self._user_email, verify=True))
def testDeleteUnknownDomainUserRaises(self):
self.assertRaises(
admin_api_tool_errors.AdminAPIToolUserError,
self._api_wrapper.DeleteDomainUser, self._unknown_user_email,
verify=True)
if __name__ == '__main__':
unittest.main()
| google/gfw-toolkit | toolkit/tests/directory_api_users_delete_user_test.py | Python | apache-2.0 | 1,930 |
# -*- coding: utf-8 -*-
# Copyright 2016 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Fleet Work Order Cargo",
"version": "8.0.2.0.0",
"category": "Fleet",
"website": "https://opensynergy-indonesia.com/",
"author": "OpenSynergy Indonesia",
"license": "AGPL-3",
"installable": True,
"depends": [
"fleet_work_order",
"fleet_vehicle_cargo",
"stock_shipment_management",
],
"data": [
"views/fleet_work_order_views.xml",
"views/shipment_plan_views.xml",
],
}
| open-synergy/opensynid-fleet | fleet_work_order_cargo/__openerp__.py | Python | agpl-3.0 | 590 |
"""Adds a simulated sensor."""
from datetime import datetime
import math
from random import Random
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
import homeassistant.util.dt as dt_util
CONF_AMP = "amplitude"
CONF_FWHM = "spread"
CONF_MEAN = "mean"
CONF_PERIOD = "period"
CONF_PHASE = "phase"
CONF_SEED = "seed"
CONF_UNIT = "unit"
CONF_RELATIVE_TO_EPOCH = "relative_to_epoch"
DEFAULT_AMP = 1
DEFAULT_FWHM = 0
DEFAULT_MEAN = 0
DEFAULT_NAME = "simulated"
DEFAULT_PERIOD = 60
DEFAULT_PHASE = 0
DEFAULT_SEED = 999
DEFAULT_UNIT = "value"
DEFAULT_RELATIVE_TO_EPOCH = True
ICON = "mdi:chart-line"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_AMP, default=DEFAULT_AMP): vol.Coerce(float),
vol.Optional(CONF_FWHM, default=DEFAULT_FWHM): vol.Coerce(float),
vol.Optional(CONF_MEAN, default=DEFAULT_MEAN): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PERIOD, default=DEFAULT_PERIOD): cv.positive_int,
vol.Optional(CONF_PHASE, default=DEFAULT_PHASE): vol.Coerce(float),
vol.Optional(CONF_SEED, default=DEFAULT_SEED): cv.positive_int,
vol.Optional(CONF_UNIT, default=DEFAULT_UNIT): cv.string,
vol.Optional(
CONF_RELATIVE_TO_EPOCH, default=DEFAULT_RELATIVE_TO_EPOCH
): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the simulated sensor."""
name = config.get(CONF_NAME)
unit = config.get(CONF_UNIT)
amp = config.get(CONF_AMP)
mean = config.get(CONF_MEAN)
period = config.get(CONF_PERIOD)
phase = config.get(CONF_PHASE)
fwhm = config.get(CONF_FWHM)
seed = config.get(CONF_SEED)
relative_to_epoch = config.get(CONF_RELATIVE_TO_EPOCH)
sensor = SimulatedSensor(
name, unit, amp, mean, period, phase, fwhm, seed, relative_to_epoch
)
add_entities([sensor], True)
class SimulatedSensor(Entity):
"""Class for simulated sensor."""
def __init__(
self, name, unit, amp, mean, period, phase, fwhm, seed, relative_to_epoch
):
"""Init the class."""
self._name = name
self._unit = unit
self._amp = amp
self._mean = mean
self._period = period
self._phase = phase # phase in degrees
self._fwhm = fwhm
self._seed = seed
self._random = Random(seed) # A local seeded Random
self._start_time = (
datetime(1970, 1, 1, tzinfo=dt_util.UTC)
if relative_to_epoch
else dt_util.utcnow()
)
self._relative_to_epoch = relative_to_epoch
self._state = None
def time_delta(self):
"""Return the time delta."""
dt0 = self._start_time
dt1 = dt_util.utcnow()
return dt1 - dt0
def signal_calc(self):
"""Calculate the signal."""
mean = self._mean
amp = self._amp
time_delta = self.time_delta().total_seconds() * 1e6 # to milliseconds
period = self._period * 1e6 # to milliseconds
fwhm = self._fwhm / 2
phase = math.radians(self._phase)
if period == 0:
periodic = 0
else:
periodic = amp * (math.sin((2 * math.pi * time_delta / period) + phase))
noise = self._random.gauss(mu=0, sigma=fwhm)
return round(mean + periodic + noise, 3)
async def async_update(self):
"""Update the sensor."""
self._state = self.signal_calc()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit
@property
def device_state_attributes(self):
"""Return other details about the sensor state."""
return {
"amplitude": self._amp,
"mean": self._mean,
"period": self._period,
"phase": self._phase,
"spread": self._fwhm,
"seed": self._seed,
"relative_to_epoch": self._relative_to_epoch,
}
| sdague/home-assistant | homeassistant/components/simulated/sensor.py | Python | apache-2.0 | 4,535 |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
#connect to a local machine for debugging
#url = "http://bitcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 18332)
#proxy = AuthServiceProxy(url)
#proxy.url = url # store URL on proxy for info
#self.nodes.append(proxy)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransaction(rawtx)
errorString = ""
try:
rawtx = self.nodes[2].sendrawtransaction(rawtx['hex'])
except JSONRPCException,e:
errorString = e.error['message']
assert("Missing inputs" in errorString)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
addr3Obj = self.nodes[2].validateaddress(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
sPK = rawTx['vout'][0]['scriptPubKey']['hex']
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS A INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
if __name__ == '__main__':
RawTransactionsTest().main()
| goku1997/bitcoin | qa/rpc-tests/rawtransactions.py | Python | mit | 5,874 |
from distutils.core import setup
setup(
name='tgboost',
version='1.0',
description='tiny gradient boosting tree',
author='wepon',
author_email='[email protected]',
url='http://wepon.me',
packages=['tgboost'],
package_data={'tgboost': ['tgboost.jar']},
package_dir={'tgboost': 'tgboost'},
)
| wepe/tgboost | python-package/setup.py | Python | mit | 326 |
from git.config import SectionConstraint
from git.util import join_path
from git.exc import GitCommandError
from .symbolic import SymbolicReference
from .reference import Reference
__all__ = ["HEAD", "Head"]
class HEAD(SymbolicReference):
"""Special case of a Symbolic Reference as it represents the repository's
HEAD reference."""
_HEAD_NAME = 'HEAD'
_ORIG_HEAD_NAME = 'ORIG_HEAD'
__slots__ = tuple()
def __init__(self, repo, path=_HEAD_NAME):
if path != self._HEAD_NAME:
raise ValueError("HEAD instance must point to %r, got %r" % (self._HEAD_NAME, path))
super(HEAD, self).__init__(repo, path)
def orig_head(self):
"""
:return: SymbolicReference pointing at the ORIG_HEAD, which is maintained
to contain the previous value of HEAD"""
return SymbolicReference(self.repo, self._ORIG_HEAD_NAME)
def reset(self, commit='HEAD', index=True, working_tree=False,
paths=None, **kwargs):
"""Reset our HEAD to the given commit optionally synchronizing
the index and working tree. The reference we refer to will be set to
commit as well.
:param commit:
Commit object, Reference Object or string identifying a revision we
should reset HEAD to.
:param index:
If True, the index will be set to match the given commit. Otherwise
it will not be touched.
:param working_tree:
If True, the working tree will be forcefully adjusted to match the given
commit, possibly overwriting uncommitted changes without warning.
If working_tree is True, index must be true as well
:param paths:
Single path or list of paths relative to the git root directory
that are to be reset. This allows to partially reset individual files.
:param kwargs:
Additional arguments passed to git-reset.
:return: self"""
mode = "--soft"
add_arg = None
if index:
mode = "--mixed"
# it appears, some git-versions declare mixed and paths deprecated
# see http://github.com/Byron/GitPython/issues#issue/2
if paths:
mode = None
# END special case
# END handle index
if working_tree:
mode = "--hard"
if not index:
raise ValueError("Cannot reset the working tree if the index is not reset as well")
# END working tree handling
if paths:
add_arg = "--"
# END nicely separate paths from rest
try:
self.repo.git.reset(mode, commit, add_arg, paths, **kwargs)
except GitCommandError as e:
# git nowadays may use 1 as status to indicate there are still unstaged
# modifications after the reset
if e.status != 1:
raise
# END handle exception
return self
class Head(Reference):
"""A Head is a named reference to a Commit. Every Head instance contains a name
and a Commit object.
Examples::
>>> repo = Repo("/path/to/repo")
>>> head = repo.heads[0]
>>> head.name
'master'
>>> head.commit
<git.Commit "1c09f116cbc2cb4100fb6935bb162daa4723f455">
>>> head.commit.hexsha
'1c09f116cbc2cb4100fb6935bb162daa4723f455'"""
_common_path_default = "refs/heads"
k_config_remote = "remote"
k_config_remote_ref = "merge" # branch to merge from remote
@classmethod
def delete(cls, repo, *heads, **kwargs):
"""Delete the given heads
:param force:
If True, the heads will be deleted even if they are not yet merged into
the main development stream.
Default False"""
force = kwargs.get("force", False)
flag = "-d"
if force:
flag = "-D"
repo.git.branch(flag, *heads)
def set_tracking_branch(self, remote_reference):
"""
Configure this branch to track the given remote reference. This will alter
this branch's configuration accordingly.
:param remote_reference: The remote reference to track or None to untrack
any references
:return: self"""
from .remote import RemoteReference
if remote_reference is not None and not isinstance(remote_reference, RemoteReference):
raise ValueError("Incorrect parameter type: %r" % remote_reference)
# END handle type
writer = self.config_writer()
if remote_reference is None:
writer.remove_option(self.k_config_remote)
writer.remove_option(self.k_config_remote_ref)
if len(writer.options()) == 0:
writer.remove_section()
# END handle remove section
else:
writer.set_value(self.k_config_remote, remote_reference.remote_name)
writer.set_value(self.k_config_remote_ref, Head.to_full_path(remote_reference.remote_head))
# END handle ref value
writer.release()
return self
def tracking_branch(self):
"""
:return: The remote_reference we are tracking, or None if we are
not a tracking branch"""
from .remote import RemoteReference
reader = self.config_reader()
if reader.has_option(self.k_config_remote) and reader.has_option(self.k_config_remote_ref):
ref = Head(self.repo, Head.to_full_path(reader.get_value(self.k_config_remote_ref)))
remote_refpath = RemoteReference.to_full_path(join_path(reader.get_value(self.k_config_remote), ref.name))
return RemoteReference(self.repo, remote_refpath)
# END handle have tracking branch
# we are not a tracking branch
return None
def rename(self, new_path, force=False):
"""Rename self to a new path
:param new_path:
Either a simple name or a path, i.e. new_name or features/new_name.
The prefix refs/heads is implied
:param force:
If True, the rename will succeed even if a head with the target name
already exists.
:return: self
:note: respects the ref log as git commands are used"""
flag = "-m"
if force:
flag = "-M"
self.repo.git.branch(flag, self, new_path)
self.path = "%s/%s" % (self._common_path_default, new_path)
return self
def checkout(self, force=False, **kwargs):
"""Checkout this head by setting the HEAD to this reference, by updating the index
to reflect the tree we point to and by updating the working tree to reflect
the latest index.
The command will fail if changed working tree files would be overwritten.
:param force:
If True, changes to the index and the working tree will be discarded.
If False, GitCommandError will be raised in that situation.
:param kwargs:
Additional keyword arguments to be passed to git checkout, i.e.
b='new_branch' to create a new branch at the given spot.
:return:
The active branch after the checkout operation, usually self unless
a new branch has been created.
:note:
By default it is only allowed to checkout heads - everything else
will leave the HEAD detached which is allowed and possible, but remains
a special state that some tools might not be able to handle."""
kwargs['f'] = force
if kwargs['f'] is False:
kwargs.pop('f')
self.repo.git.checkout(self, **kwargs)
return self.repo.active_branch
#{ Configruation
def _config_parser(self, read_only):
if read_only:
parser = self.repo.config_reader()
else:
parser = self.repo.config_writer()
# END handle parser instance
return SectionConstraint(parser, 'branch "%s"' % self.name)
def config_reader(self):
"""
:return: A configuration parser instance constrained to only read
this instance's values"""
return self._config_parser(read_only=True)
def config_writer(self):
"""
:return: A configuration writer instance with read-and write acccess
to options of this head"""
return self._config_parser(read_only=False)
#} END configuration
| avinassh/GitPython | git/refs/head.py | Python | bsd-3-clause | 8,539 |
Subsets and Splits