repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ResolveWang/algrithm_qa | arrandmatrix/q5.py | 1 | 1037 | """
给定一个无序数组arr,求出需要排序的最短子数组长度。
例如:
arr=[1, 5, 3, 4, 2, 6, 7],返回4,因为只有[5, 3, 2, 4]需要排序。
"""
class ShortestSubarr:
@classmethod
def get_shortest_subarr(cls, arr):
if not arr or len(arr) == 1:
return 0
length = len(arr)
max_index = -1
index = length - 1
min_value = arr[index]
while index >= 0:
if arr[index] <= min_value:
min_value = arr[index]
else:
max_index = index
index -= 1
if max_index == -1:
return 0
min_index = -1
index = 0
max_value = arr[index]
while index < length:
if arr[index] >= max_value:
max_value = arr[index]
else:
min_index = index
index += 1
return min_index - max_index + 1
if __name__ == '__main__':
print(ShortestSubarr.get_shortest_subarr([1, 5, 3, 4, 2, 6, 7])) | mit | 4,220,207,234,826,846,000 | 22.512195 | 68 | 0.464174 | false |
onshape-public/onshape-clients | python/onshape_client/oas/models/bt_modifiable_entity_only_filter1593_all_of.py | 1 | 4814 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class BTModifiableEntityOnlyFilter1593AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"modifiable_only": (bool,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"modifiable_only": "modifiableOnly", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_modifiable_entity_only_filter1593_all_of.BTModifiableEntityOnlyFilter1593AllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
modifiable_only (bool): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| mit | 1,401,202,253,795,800,300 | 31.972603 | 121 | 0.579144 | false |
gongbudaizhe/bilib | demos/minion_interrogation/solution.py | 1 | 1039 | # This problem can be really computational expensive if we simply traverse all
# the possible orderings (50! = 3.0414093e+64)
# Instead, we observe that if two adjacent minions(m[i], m[i+1]) in the ordering
# with property(t[i], t[i+1], t is the time the minion takes to complete the
# task) and (p[i], p[i+1], p is probability that the minion will tell the true
# answer) have the inequality t[i]/p[i] < t[i+1]/p[i+1], then we should swap the
# two minions to minimize the expected time cost. if t[i]/p[i] = t[i+1]/p[i+1],
# then the order doesn't matter, this is where lexicographical order should
# be used.
def compare(m1, m2):
r1 = m1[0] * m1[2] / float(m1[1])
r2 = m2[0] * m2[2] / float(m2[1])
if r1 == r2:
# lexicographical order
return m1[3] - m2[3]
else:
if r1 > r2:
return 1
else:
return -1
def answer(minions):
# add index
minions_aug = [val + [idx] for idx, val in enumerate(minions)]
return [m[3] for m in sorted(minions_aug, cmp=compare)]
| mit | -1,202,021,335,986,941,400 | 38.961538 | 80 | 0.628489 | false |
yo-alan/personal | v/ui_editar.py | 1 | 11697 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/alan/dev/personal/v/ui_editar.ui'
#
# Created: Sat Jan 31 18:27:20 2015
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Editar(object):
def setupUi(self, Editar):
Editar.setObjectName(_fromUtf8("Editar"))
Editar.resize(522, 324)
Editar.setModal(True)
self.verticalLayout = QtGui.QVBoxLayout(Editar)
self.verticalLayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.groupBox = QtGui.QGroupBox(Editar)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.formLayout_3 = QtGui.QFormLayout(self.groupBox)
self.formLayout_3.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_3.setObjectName(_fromUtf8("formLayout_3"))
self.lblNombre = QtGui.QLabel(self.groupBox)
self.lblNombre.setObjectName(_fromUtf8("lblNombre"))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.LabelRole, self.lblNombre)
self.leNombre = QtGui.QLineEdit(self.groupBox)
self.leNombre.setObjectName(_fromUtf8("leNombre"))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.FieldRole, self.leNombre)
self.lblApellido = QtGui.QLabel(self.groupBox)
self.lblApellido.setObjectName(_fromUtf8("lblApellido"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.LabelRole, self.lblApellido)
self.leApellido = QtGui.QLineEdit(self.groupBox)
self.leApellido.setObjectName(_fromUtf8("leApellido"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.FieldRole, self.leApellido)
self.lblFechaNacimiento = QtGui.QLabel(self.groupBox)
self.lblFechaNacimiento.setObjectName(_fromUtf8("lblFechaNacimiento"))
self.formLayout_3.setWidget(2, QtGui.QFormLayout.LabelRole, self.lblFechaNacimiento)
self.deFechaNacimiento = QtGui.QDateEdit(self.groupBox)
self.deFechaNacimiento.setObjectName(_fromUtf8("deFechaNacimiento"))
self.formLayout_3.setWidget(2, QtGui.QFormLayout.FieldRole, self.deFechaNacimiento)
self.lblGenero = QtGui.QLabel(self.groupBox)
self.lblGenero.setObjectName(_fromUtf8("lblGenero"))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.LabelRole, self.lblGenero)
self.cmbGenero = QtGui.QComboBox(self.groupBox)
self.cmbGenero.setObjectName(_fromUtf8("cmbGenero"))
self.cmbGenero.addItem(_fromUtf8(""))
self.cmbGenero.addItem(_fromUtf8(""))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.FieldRole, self.cmbGenero)
self.lblCuil = QtGui.QLabel(self.groupBox)
self.lblCuil.setObjectName(_fromUtf8("lblCuil"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.LabelRole, self.lblCuil)
self.leCuil = QtGui.QLineEdit(self.groupBox)
self.leCuil.setMaxLength(13)
self.leCuil.setObjectName(_fromUtf8("leCuil"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.FieldRole, self.leCuil)
self.lblTelefono = QtGui.QLabel(self.groupBox)
self.lblTelefono.setObjectName(_fromUtf8("lblTelefono"))
self.formLayout_3.setWidget(5, QtGui.QFormLayout.LabelRole, self.lblTelefono)
self.leTelefono = QtGui.QLineEdit(self.groupBox)
self.leTelefono.setObjectName(_fromUtf8("leTelefono"))
self.formLayout_3.setWidget(5, QtGui.QFormLayout.FieldRole, self.leTelefono)
self.lblDomicilio = QtGui.QLabel(self.groupBox)
self.lblDomicilio.setObjectName(_fromUtf8("lblDomicilio"))
self.formLayout_3.setWidget(6, QtGui.QFormLayout.LabelRole, self.lblDomicilio)
self.leDomicilio = QtGui.QLineEdit(self.groupBox)
self.leDomicilio.setObjectName(_fromUtf8("leDomicilio"))
self.formLayout_3.setWidget(6, QtGui.QFormLayout.FieldRole, self.leDomicilio)
self.horizontalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(Editar)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.formLayout_2 = QtGui.QFormLayout(self.groupBox_2)
self.formLayout_2.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.lblNroLegajo = QtGui.QLabel(self.groupBox_2)
self.lblNroLegajo.setObjectName(_fromUtf8("lblNroLegajo"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.lblNroLegajo)
self.sbNroLegajo = QtGui.QSpinBox(self.groupBox_2)
self.sbNroLegajo.setMinimum(1)
self.sbNroLegajo.setMaximum(1000)
self.sbNroLegajo.setObjectName(_fromUtf8("sbNroLegajo"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.sbNroLegajo)
self.lblFechaIngreso = QtGui.QLabel(self.groupBox_2)
self.lblFechaIngreso.setObjectName(_fromUtf8("lblFechaIngreso"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.lblFechaIngreso)
self.deFechaIngreso = QtGui.QDateEdit(self.groupBox_2)
self.deFechaIngreso.setObjectName(_fromUtf8("deFechaIngreso"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.deFechaIngreso)
self.lblRevista = QtGui.QLabel(self.groupBox_2)
self.lblRevista.setObjectName(_fromUtf8("lblRevista"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.LabelRole, self.lblRevista)
self.cmbRevista = QtGui.QComboBox(self.groupBox_2)
self.cmbRevista.setObjectName(_fromUtf8("cmbRevista"))
self.cmbRevista.addItem(_fromUtf8(""))
self.cmbRevista.addItem(_fromUtf8(""))
self.cmbRevista.addItem(_fromUtf8(""))
self.cmbRevista.addItem(_fromUtf8(""))
self.cmbRevista.addItem(_fromUtf8(""))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.FieldRole, self.cmbRevista)
self.lblCargo = QtGui.QLabel(self.groupBox_2)
self.lblCargo.setObjectName(_fromUtf8("lblCargo"))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.LabelRole, self.lblCargo)
self.cmbCargo = QtGui.QComboBox(self.groupBox_2)
self.cmbCargo.setObjectName(_fromUtf8("cmbCargo"))
self.cmbCargo.addItem(_fromUtf8(""))
self.cmbCargo.addItem(_fromUtf8(""))
self.cmbCargo.addItem(_fromUtf8(""))
self.cmbCargo.addItem(_fromUtf8(""))
self.cmbCargo.addItem(_fromUtf8(""))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.FieldRole, self.cmbCargo)
self.lblNivel = QtGui.QLabel(self.groupBox_2)
self.lblNivel.setObjectName(_fromUtf8("lblNivel"))
self.formLayout_2.setWidget(6, QtGui.QFormLayout.LabelRole, self.lblNivel)
self.leNivel = QtGui.QLineEdit(self.groupBox_2)
self.leNivel.setObjectName(_fromUtf8("leNivel"))
self.formLayout_2.setWidget(6, QtGui.QFormLayout.FieldRole, self.leNivel)
self.horizontalLayout.addWidget(self.groupBox_2)
self.verticalLayout.addLayout(self.horizontalLayout)
self.buttonBox = QtGui.QDialogButtonBox(Editar)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Editar)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Editar.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Editar.reject)
QtCore.QMetaObject.connectSlotsByName(Editar)
def retranslateUi(self, Editar):
Editar.setWindowTitle(QtGui.QApplication.translate("Editar", "Editar empleado", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("Editar", "Datos personales", None, QtGui.QApplication.UnicodeUTF8))
self.lblNombre.setText(QtGui.QApplication.translate("Editar", "Nombre:", None, QtGui.QApplication.UnicodeUTF8))
self.lblApellido.setText(QtGui.QApplication.translate("Editar", "Apellido:", None, QtGui.QApplication.UnicodeUTF8))
self.lblFechaNacimiento.setText(QtGui.QApplication.translate("Editar", "F. Nacimiento:", None, QtGui.QApplication.UnicodeUTF8))
self.deFechaNacimiento.setDisplayFormat(QtGui.QApplication.translate("Editar", "dd/MM/yyyy", None, QtGui.QApplication.UnicodeUTF8))
self.lblGenero.setText(QtGui.QApplication.translate("Editar", "Género:", None, QtGui.QApplication.UnicodeUTF8))
self.cmbGenero.setItemText(0, QtGui.QApplication.translate("Editar", "Femenino", None, QtGui.QApplication.UnicodeUTF8))
self.cmbGenero.setItemText(1, QtGui.QApplication.translate("Editar", "Masculino", None, QtGui.QApplication.UnicodeUTF8))
self.lblCuil.setText(QtGui.QApplication.translate("Editar", "Cuil:", None, QtGui.QApplication.UnicodeUTF8))
self.lblTelefono.setText(QtGui.QApplication.translate("Editar", "Teléfono:", None, QtGui.QApplication.UnicodeUTF8))
self.lblDomicilio.setText(QtGui.QApplication.translate("Editar", "Domicilio:", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("Editar", "Datos laborales", None, QtGui.QApplication.UnicodeUTF8))
self.lblNroLegajo.setText(QtGui.QApplication.translate("Editar", "Nro. Legajo:", None, QtGui.QApplication.UnicodeUTF8))
self.lblFechaIngreso.setText(QtGui.QApplication.translate("Editar", "Ingreso:", None, QtGui.QApplication.UnicodeUTF8))
self.deFechaIngreso.setDisplayFormat(QtGui.QApplication.translate("Editar", "dd/MM/yyyy", None, QtGui.QApplication.UnicodeUTF8))
self.lblRevista.setText(QtGui.QApplication.translate("Editar", "Sit. de Revista:", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(0, QtGui.QApplication.translate("Editar", "Comisión", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(1, QtGui.QApplication.translate("Editar", "Pasantía", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(2, QtGui.QApplication.translate("Editar", "Permanente", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(3, QtGui.QApplication.translate("Editar", "Temporaria", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(4, QtGui.QApplication.translate("Editar", "Transitoria", None, QtGui.QApplication.UnicodeUTF8))
self.lblCargo.setText(QtGui.QApplication.translate("Editar", "Cargo:", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(0, QtGui.QApplication.translate("Editar", "Administrativo", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(1, QtGui.QApplication.translate("Editar", "Jerárquico", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(2, QtGui.QApplication.translate("Editar", "Obrero", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(3, QtGui.QApplication.translate("Editar", "Profesional", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(4, QtGui.QApplication.translate("Editar", "Servicio", None, QtGui.QApplication.UnicodeUTF8))
self.lblNivel.setText(QtGui.QApplication.translate("Editar", "Nivel:", None, QtGui.QApplication.UnicodeUTF8))
| mit | 4,544,980,707,808,152,000 | 68.595238 | 139 | 0.729644 | false |
soasme/wikisensei | wikisensei/wiki/migrations/0001_initial.py | 1 | 1769 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-12 08:39
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Version',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.IntegerField(default=0)),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-version'],
},
),
migrations.CreateModel(
name='Wiki',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('version', models.IntegerField(default=0)),
('privacy', models.IntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='version',
name='wiki',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='versions', to='wiki.Wiki'),
),
]
| gpl-3.0 | -5,097,590,489,198,971,000 | 35.854167 | 122 | 0.568683 | false |
tchellomello/home-assistant | tests/components/homematicip_cloud/test_device.py | 1 | 8283 | """Common tests for HomematicIP devices."""
from homematicip.base.enums import EventType
from homeassistant.components.homematicip_cloud import DOMAIN as HMIPC_DOMAIN
from homeassistant.components.homematicip_cloud.hap import HomematicipHAP
from homeassistant.const import STATE_ON, STATE_UNAVAILABLE
from homeassistant.helpers import device_registry as dr, entity_registry as er
from .helper import (
HAPID,
HomeFactory,
async_manipulate_test_data,
get_and_check_entity_basics,
)
from tests.async_mock import patch
async def test_hmip_load_all_supported_devices(hass, default_mock_hap_factory):
"""Ensure that all supported devices could be loaded."""
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=None, test_groups=None
)
assert len(mock_hap.hmip_device_by_entity_id) == 191
async def test_hmip_remove_device(hass, default_mock_hap_factory):
"""Test Remove of hmip device."""
entity_id = "light.treppe_ch"
entity_name = "Treppe CH"
device_model = "HmIP-BSL"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Treppe"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert hmip_device
device_registry = await dr.async_get_registry(hass)
entity_registry = await er.async_get_registry(hass)
pre_device_count = len(device_registry.devices)
pre_entity_count = len(entity_registry.entities)
pre_mapping_count = len(mock_hap.hmip_device_by_entity_id)
hmip_device.fire_remove_event()
await hass.async_block_till_done()
assert len(device_registry.devices) == pre_device_count - 1
assert len(entity_registry.entities) == pre_entity_count - 3
assert len(mock_hap.hmip_device_by_entity_id) == pre_mapping_count - 3
async def test_hmip_add_device(hass, default_mock_hap_factory, hmip_config_entry):
"""Test Remove of hmip device."""
entity_id = "light.treppe_ch"
entity_name = "Treppe CH"
device_model = "HmIP-BSL"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Treppe"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert hmip_device
device_registry = await dr.async_get_registry(hass)
entity_registry = await er.async_get_registry(hass)
pre_device_count = len(device_registry.devices)
pre_entity_count = len(entity_registry.entities)
pre_mapping_count = len(mock_hap.hmip_device_by_entity_id)
hmip_device.fire_remove_event()
await hass.async_block_till_done()
assert len(device_registry.devices) == pre_device_count - 1
assert len(entity_registry.entities) == pre_entity_count - 3
assert len(mock_hap.hmip_device_by_entity_id) == pre_mapping_count - 3
reloaded_hap = HomematicipHAP(hass, hmip_config_entry)
with patch(
"homeassistant.components.homematicip_cloud.HomematicipHAP",
return_value=reloaded_hap,
), patch.object(reloaded_hap, "async_connect"), patch.object(
reloaded_hap, "get_hap", return_value=mock_hap.home
), patch(
"homeassistant.components.homematicip_cloud.hap.asyncio.sleep"
):
mock_hap.home.fire_create_event(event_type=EventType.DEVICE_ADDED)
await hass.async_block_till_done()
assert len(device_registry.devices) == pre_device_count
assert len(entity_registry.entities) == pre_entity_count
new_hap = hass.data[HMIPC_DOMAIN][HAPID]
assert len(new_hap.hmip_device_by_entity_id) == pre_mapping_count
async def test_hmip_remove_group(hass, default_mock_hap_factory):
"""Test Remove of hmip group."""
entity_id = "switch.strom_group"
entity_name = "Strom Group"
device_model = None
mock_hap = await default_mock_hap_factory.async_get_mock_hap(test_groups=["Strom"])
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert hmip_device
device_registry = await dr.async_get_registry(hass)
entity_registry = await er.async_get_registry(hass)
pre_device_count = len(device_registry.devices)
pre_entity_count = len(entity_registry.entities)
pre_mapping_count = len(mock_hap.hmip_device_by_entity_id)
hmip_device.fire_remove_event()
await hass.async_block_till_done()
assert len(device_registry.devices) == pre_device_count
assert len(entity_registry.entities) == pre_entity_count - 1
assert len(mock_hap.hmip_device_by_entity_id) == pre_mapping_count - 1
async def test_all_devices_unavailable_when_hap_not_connected(
hass, default_mock_hap_factory
):
"""Test make all devices unavaulable when hap is not connected."""
entity_id = "light.treppe_ch"
entity_name = "Treppe CH"
device_model = "HmIP-BSL"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Treppe"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert hmip_device
assert mock_hap.home.connected
await async_manipulate_test_data(hass, mock_hap.home, "connected", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_UNAVAILABLE
async def test_hap_reconnected(hass, default_mock_hap_factory):
"""Test reconnect hap."""
entity_id = "light.treppe_ch"
entity_name = "Treppe CH"
device_model = "HmIP-BSL"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Treppe"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert hmip_device
assert mock_hap.home.connected
await async_manipulate_test_data(hass, mock_hap.home, "connected", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_UNAVAILABLE
mock_hap._accesspoint_connected = False # pylint: disable=protected-access
await async_manipulate_test_data(hass, mock_hap.home, "connected", True)
await hass.async_block_till_done()
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hap_with_name(hass, mock_connection, hmip_config_entry):
"""Test hap with name."""
home_name = "TestName"
entity_id = f"light.{home_name.lower()}_treppe_ch"
entity_name = f"{home_name} Treppe CH"
device_model = "HmIP-BSL"
hmip_config_entry.data = {**hmip_config_entry.data, "name": home_name}
mock_hap = await HomeFactory(
hass, mock_connection, hmip_config_entry
).async_get_mock_hap(test_devices=["Treppe"])
assert mock_hap
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert hmip_device
assert ha_state.state == STATE_ON
assert ha_state.attributes["friendly_name"] == entity_name
async def test_hmip_reset_energy_counter_services(hass, default_mock_hap_factory):
"""Test reset_energy_counter service."""
entity_id = "switch.pc"
entity_name = "Pc"
device_model = "HMIP-PSM"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state
await hass.services.async_call(
"homematicip_cloud",
"reset_energy_counter",
{"entity_id": "switch.pc"},
blocking=True,
)
assert hmip_device.mock_calls[-1][0] == "reset_energy_counter"
assert len(hmip_device._connection.mock_calls) == 2 # pylint: disable=W0212
await hass.services.async_call(
"homematicip_cloud", "reset_energy_counter", {"entity_id": "all"}, blocking=True
)
assert hmip_device.mock_calls[-1][0] == "reset_energy_counter"
assert len(hmip_device._connection.mock_calls) == 4 # pylint: disable=W0212
| apache-2.0 | -8,402,342,774,571,276,000 | 33.227273 | 88 | 0.683569 | false |
yeming233/rally | rally/plugins/openstack/scenarios/cinder/volume_backups.py | 1 | 2728 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils
from rally.task import validation
"""Scenarios for Cinder Volume Backup."""
@validation.add("number", param_name="size", minval=1, integer_only=True)
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="create_volume_kwargs")
@validation.add("restricted_parameters", param_names="name",
subdict="create_backup_kwargs")
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_cinder_services", services="cinder-backup")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup": ["cinder"]},
name="CinderVolumeBackups."
"create_incremental_volume_backup", platform="openstack")
class CreateIncrementalVolumeBackup(cinder_utils.CinderBasic):
def run(self, size, do_delete=True, create_volume_kwargs=None,
create_backup_kwargs=None):
"""Create a incremental volume backup.
The scenario first create a volume, the create a backup, the backup
is full backup. Because Incremental backup must be based on the
full backup. finally create a incremental backup.
:param size: volume size in GB
:param do_delete: deletes backup and volume after creating if True
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self.cinder.create_volume(size, **create_volume_kwargs)
backup1 = self.cinder.create_backup(volume.id, **create_backup_kwargs)
backup2 = self.cinder.create_backup(volume.id, incremental=True)
if do_delete:
self.cinder.delete_backup(backup2)
self.cinder.delete_backup(backup1)
self.cinder.delete_volume(volume)
| apache-2.0 | -3,003,851,158,195,206,000 | 44.466667 | 78 | 0.70088 | false |
MapQuest/mapquest-osm-server | src/python/frontend/maphandler.py | 1 | 7622 | # Copyright (c) 2011 AOL Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
## Support retrieval of the map data in a bounding box.
import geohash
import tornado.web
from lxml import etree as ET
import apiserver.const as C
from apiserver.osmelement import encode_coordinate, new_osm_response
from util import filter_references, response_to_xml
def _filter_in_bbox(bbox, geodocs):
"Return the list of nodes that fall into the given bounding box."
w,s,e,n = map(encode_coordinate, bbox)
nodeset = set()
for gd in geodocs:
for (nid, lat, lon) in gd.get_node_info():
if w <= lon < e and s <= lat < n:
nodeset.add(nid)
return nodeset
class MapHandler(tornado.web.RequestHandler):
"Handle requests for the /map API."
def initialize(self, cfg, datastore):
self.datastore = datastore
self.precision = cfg.getint(C.DATASTORE, C.GEOHASH_LENGTH)
def get(self, *args, **kwargs):
'''Service a GET request to the '/map' URI.
The 'bbox' parameter contains 4 coordinates "l" (w), "b" (s),
"r" (e) and "t" (n).'''
# Sanity check the input.
bbox_arg = self.get_argument('bbox', None)
if not bbox_arg:
raise tornado.web.HTTPError(400) # Bad Syntax
bbox = bbox_arg.split(',')
if len(bbox) != 4:
raise tornado.web.HTTPError(400)
try:
w,s,e,n = map(float, bbox)
except ValueError:
raise tornado.web.HTTPError(400)
# Check the "l,b,r,t" coordinates passed in for sanity.
if w < C.LON_MIN or w > C.LON_MAX or \
e < C.LON_MIN or e > C.LON_MAX or \
s < C.LAT_MIN or s > C.LAT_MAX or \
n < C.LAT_MIN or n > C.LAT_MAX or \
n < s or e < w:
raise tornado.web.HTTPError(400)
nodelist, ways, relations = self.handle_map(bbox)
response = self.build_bbox_response(nodelist, ways, relations, bbox)
self.set_header(C.CONTENT_TYPE, C.TEXT_XML)
self.write(response_to_xml(response))
def build_bbox_response(self, nodes, ways, relations, bbox):
"""Build an OSM response for the query."""
# Create a new response element.
osm = new_osm_response()
# Add a <bounds> element.
bb = ET.SubElement(osm, C.BOUNDS)
(bb.attrib[C.MINLON], bb.attrib[C.MINLAT],
bb.attrib[C.MAXLON], bb.attrib[C.MAXLAT]) = map(str, bbox)
# Add nodes, ways and relations in that order.
for n in nodes:
n.build_response(osm)
for w in ways:
w.build_response(osm)
for r in relations:
r.build_response(osm)
return osm
def handle_map(self, bbox):
"""Implementation of the /map API.
Parameters:
bbox -- Bounding box coordinates.
"""
nodelist = []
relations = []
ways = []
# This implementation follows the current implementation of
# the API server at api.openstreetmap.org (the 'rails' port).
# Look up the geo coded documents covering the desired bbox.
gckeys = self.get_geocodes(bbox)
geodocs = self.datastore.fetch_keys(C.GEODOC, gckeys)
# Step 1: Get the list of nodes contained in the given
# bounding box.
nodeset = _filter_in_bbox(bbox,
[gd for (st, gd) in geodocs if st])
if len(nodeset) == 0:
return (nodelist, ways, relations)
nodelist = [z for (st, z) in self.datastore.fetch_keys(
C.NODE, [n for n in nodeset]) if st]
# Step 2: Retrieve all ways that reference at least one node
# in the given bounding box.
wayset = filter_references(C.WAY, nodelist)
# Step 3: Retrieve any additional nodes referenced by the ways
# retrieved.
waynodeset = set()
for (st,w) in self.datastore.fetch_keys(C.WAY, [w for w in wayset]):
if st:
ways.append(w)
waynodeset.update(w.get_node_ids())
extranodeset = waynodeset - nodeset
nodelist.extend([n for (st,n) in
self.datastore.fetch_keys(C.NODE,
[n for n in extranodeset])
if st])
nodeset = nodeset | extranodeset
# Step 4: Retrieve the relations associated with these nodes.
# ... all relations that reference nodes being returned.
relset = filter_references(C.RELATION, nodelist)
# ... and relations that reference one of the ways in the wayset.
relset.update(filter_references(C.RELATION, ways))
# ... retrieve relations from the data store.
relations = [xr for (st,xr) in
self.datastore.fetch_keys(C.RELATION,
[r for r in relset])
if st]
# ... and relations referenced by existing relations
# (one-pass only).
extrarelset = filter_references(C.RELATION, relations)
newrelset = extrarelset - relset
newrels = [nr for (st, nr) in
self.datastore.fetch_keys(C.RELATION,
[r for r in newrelset])
if st]
relations.extend(newrels)
return (nodelist, ways, relations)
def get_geocodes(self, bbox):
"""Return a list of keys covering a given area.
Parameters:
bbox -- Bounding box of the desired region.
"""
# TODO: Make this more efficient for sparse areas of the map.
w, s, e, n = map(float, bbox)
n = min(C.MAXGHLAT, n) # work around a geohash library
s = min(C.MAXGHLAT, s) # limitation
assert(w <= e and s <= n)
gcset = set()
gc = geohash.encode(s, w, self.precision)
bl = geohash.bbox(gc) # Box containing point (s,w).
s_ = bl['s'];
while s_ < n: # Step south to north.
w_ = bl['w']
gc = geohash.encode(s_, w_, self.precision)
bb_sn = geohash.bbox(gc) # bounding box in S->N direction
while w_ < e: # Step west to east.
gcset.add(gc)
bb_we = geohash.bbox(gc) # in W->E direction
w_ = bb_we['e']
gc = geohash.encode(s_, w_, self.precision)
s_ = bb_sn['n']
assert(len(gcset) > 0)
return [gc for gc in gcset]
| mit | 5,400,930,514,382,168,000 | 32.875556 | 77 | 0.581737 | false |
matus-stehlik/glowing-batman | problems/migrations/0005_auto__add_field_usersolution_score.py | 1 | 16422 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserSolution.score'
db.add_column(u'problems_usersolution', 'score',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding M2M table for field corrected_by on 'UserSolution'
m2m_table_name = db.shorten_name(u'problems_usersolution_corrected_by')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('usersolution', models.ForeignKey(orm[u'problems.usersolution'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['usersolution_id', 'user_id'])
def backwards(self, orm):
# Deleting field 'UserSolution.score'
db.delete_column(u'problems_usersolution', 'score')
# Removing M2M table for field corrected_by on 'UserSolution'
db.delete_table(db.shorten_name(u'problems_usersolution_corrected_by'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'competitions.competition': {
'Meta': {'ordering': "['name']", 'object_name': 'Competition'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'competitions_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'competitions_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'organizer_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'events.event': {
'Meta': {'ordering': "['-start_time', 'end_time']", 'object_name': 'Event'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'registered_org': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'organized_event_set'", 'symmetrical': 'False', 'through': u"orm['events.EventOrgRegistration']", 'to': u"orm['auth.User']"}),
'registered_user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'through': u"orm['events.EventUserRegistration']", 'symmetrical': 'False'}),
'registration_end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {})
},
u'events.eventorgregistration': {
'Meta': {'ordering': "(u'_order',)", 'unique_together': "(('event', 'organizer'),)", 'object_name': 'EventOrgRegistration'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'organizer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'events.eventuserregistration': {
'Meta': {'ordering': "(u'_order',)", 'unique_together': "(('event', 'user'),)", 'object_name': 'EventUserRegistration'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'leaflets.leaflet': {
'Meta': {'ordering': "['competition', '-year', 'issue']", 'unique_together': "(('competition', 'year', 'issue'),)", 'object_name': 'Leaflet'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['competitions.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.IntegerField', [], {}),
'leaflet': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'problems.orgsolution': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'OrgSolution'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'organizer solutions_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'organizer solutions_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'organizer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['problems.Problem']"})
},
u'problems.problem': {
'Meta': {'object_name': 'Problem'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'problems_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['problems.ProblemCategory']"}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['competitions.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'problems_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'severity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['problems.ProblemSeverity']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'problems.problemcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'ProblemCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'problems.probleminset': {
'Meta': {'ordering': "['position']", 'unique_together': "(['problem', 'problemset'],)", 'object_name': 'ProblemInSet'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['problems.Problem']"}),
'problemset': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['problems.ProblemSet']"})
},
u'problems.problemset': {
'Meta': {'object_name': 'ProblemSet'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sets_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['competitions.Competition']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'leaflet': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['leaflets.Leaflet']", 'null': 'True', 'blank': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sets_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'problems': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['problems.Problem']", 'through': u"orm['problems.ProblemInSet']", 'symmetrical': 'False'})
},
u'problems.problemseverity': {
'Meta': {'ordering': "['level']", 'object_name': 'ProblemSeverity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'problems.usersolution': {
'Meta': {'ordering': "(u'_order',)", 'unique_together': "(['user', 'problem'],)", 'object_name': 'UserSolution'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user solutions_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
'corrected_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'usersolutions_corrected_set'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user solutions_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['problems.Problem']"}),
'score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'solution': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['problems'] | mit | 1,788,266,054,158,258,400 | 82.790816 | 230 | 0.563512 | false |
jiangxb1987/spark | python/pyspark/ml/stat.py | 1 | 16492 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since, SparkContext
from pyspark.ml.common import _java2py, _py2java
from pyspark.ml.wrapper import JavaWrapper, _jvm
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.functions import lit
class ChiSquareTest(object):
"""
Conduct Pearson's independence test for every feature against the label. For each feature,
the (feature, label) pairs are converted into a contingency matrix for which the Chi-squared
statistic is computed. All label and feature values must be categorical.
The null hypothesis is that the occurrence of the outcomes is statistically independent.
.. versionadded:: 2.2.0
"""
@staticmethod
@since("2.2.0")
def test(dataset, featuresCol, labelCol):
"""
Perform a Pearson's independence test using dataset.
:param dataset:
DataFrame of categorical labels and categorical features.
Real-valued features will be treated as categorical for each distinct value.
:param featuresCol:
Name of features column in dataset, of type `Vector` (`VectorUDT`).
:param labelCol:
Name of label column in dataset, of any numerical type.
:return:
DataFrame containing the test result for every feature against the label.
This DataFrame will contain a single Row with the following fields:
- `pValues: Vector`
- `degreesOfFreedom: Array[Int]`
- `statistics: Vector`
Each of these fields has one value per feature.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import ChiSquareTest
>>> dataset = [[0, Vectors.dense([0, 0, 1])],
... [0, Vectors.dense([1, 0, 1])],
... [1, Vectors.dense([2, 1, 1])],
... [1, Vectors.dense([3, 1, 1])]]
>>> dataset = spark.createDataFrame(dataset, ["label", "features"])
>>> chiSqResult = ChiSquareTest.test(dataset, 'features', 'label')
>>> chiSqResult.select("degreesOfFreedom").collect()[0]
Row(degreesOfFreedom=[3, 1, 0])
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.ChiSquareTest
args = [_py2java(sc, arg) for arg in (dataset, featuresCol, labelCol)]
return _java2py(sc, javaTestObj.test(*args))
class Correlation(object):
"""
Compute the correlation matrix for the input dataset of Vectors using the specified method.
Methods currently supported: `pearson` (default), `spearman`.
.. note:: For Spearman, a rank correlation, we need to create an RDD[Double] for each column
and sort it in order to retrieve the ranks and then join the columns back into an RDD[Vector],
which is fairly costly. Cache the input Dataset before calling corr with `method = 'spearman'`
to avoid recomputing the common lineage.
.. versionadded:: 2.2.0
"""
@staticmethod
@since("2.2.0")
def corr(dataset, column, method="pearson"):
"""
Compute the correlation matrix with specified method using dataset.
:param dataset:
A Dataset or a DataFrame.
:param column:
The name of the column of vectors for which the correlation coefficient needs
to be computed. This must be a column of the dataset, and it must contain
Vector objects.
:param method:
String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`.
:return:
A DataFrame that contains the correlation matrix of the column of vectors. This
DataFrame contains a single row and a single column of name
'$METHODNAME($COLUMN)'.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import Correlation
>>> dataset = [[Vectors.dense([1, 0, 0, -2])],
... [Vectors.dense([4, 5, 0, 3])],
... [Vectors.dense([6, 7, 0, 8])],
... [Vectors.dense([9, 0, 0, 1])]]
>>> dataset = spark.createDataFrame(dataset, ['features'])
>>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0]
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...],
[ 0.0556..., 1. , NaN, 0.9135...],
[ NaN, NaN, 1. , NaN],
[ 0.4004..., 0.9135..., NaN, 1. ]])
>>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0]
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ],
[ 0.1054..., 1. , NaN, 0.9486... ],
[ NaN, NaN, 1. , NaN],
[ 0.4 , 0.9486... , NaN, 1. ]])
"""
sc = SparkContext._active_spark_context
javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation
args = [_py2java(sc, arg) for arg in (dataset, column, method)]
return _java2py(sc, javaCorrObj.corr(*args))
class KolmogorovSmirnovTest(object):
"""
Conduct the two-sided Kolmogorov Smirnov (KS) test for data sampled from a continuous
distribution.
By comparing the largest difference between the empirical cumulative
distribution of the sample data and the theoretical distribution we can provide a test for the
the null hypothesis that the sample data comes from that theoretical distribution.
.. versionadded:: 2.4.0
"""
@staticmethod
@since("2.4.0")
def test(dataset, sampleCol, distName, *params):
"""
Conduct a one-sample, two-sided Kolmogorov-Smirnov test for probability distribution
equality. Currently supports the normal distribution, taking as parameters the mean and
standard deviation.
:param dataset:
a Dataset or a DataFrame containing the sample of data to test.
:param sampleCol:
Name of sample column in dataset, of any numerical type.
:param distName:
a `string` name for a theoretical distribution, currently only support "norm".
:param params:
a list of `Double` values specifying the parameters to be used for the theoretical
distribution. For "norm" distribution, the parameters includes mean and variance.
:return:
A DataFrame that contains the Kolmogorov-Smirnov test result for the input sampled data.
This DataFrame will contain a single Row with the following fields:
- `pValue: Double`
- `statistic: Double`
>>> from pyspark.ml.stat import KolmogorovSmirnovTest
>>> dataset = [[-1.0], [0.0], [1.0]]
>>> dataset = spark.createDataFrame(dataset, ['sample'])
>>> ksResult = KolmogorovSmirnovTest.test(dataset, 'sample', 'norm', 0.0, 1.0).first()
>>> round(ksResult.pValue, 3)
1.0
>>> round(ksResult.statistic, 3)
0.175
>>> dataset = [[2.0], [3.0], [4.0]]
>>> dataset = spark.createDataFrame(dataset, ['sample'])
>>> ksResult = KolmogorovSmirnovTest.test(dataset, 'sample', 'norm', 3.0, 1.0).first()
>>> round(ksResult.pValue, 3)
1.0
>>> round(ksResult.statistic, 3)
0.175
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.KolmogorovSmirnovTest
dataset = _py2java(sc, dataset)
params = [float(param) for param in params]
return _java2py(sc, javaTestObj.test(dataset, sampleCol, distName,
_jvm().PythonUtils.toSeq(params)))
class Summarizer(object):
"""
Tools for vectorized statistics on MLlib Vectors.
The methods in this package provide various statistics for Vectors contained inside DataFrames.
This class lets users pick the statistics they would like to extract for a given column.
>>> from pyspark.ml.stat import Summarizer
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> summarizer = Summarizer.metrics("mean", "count")
>>> df = sc.parallelize([Row(weight=1.0, features=Vectors.dense(1.0, 1.0, 1.0)),
... Row(weight=0.0, features=Vectors.dense(1.0, 2.0, 3.0))]).toDF()
>>> df.select(summarizer.summary(df.features, df.weight)).show(truncate=False)
+-----------------------------------+
|aggregate_metrics(features, weight)|
+-----------------------------------+
|[[1.0,1.0,1.0], 1] |
+-----------------------------------+
<BLANKLINE>
>>> df.select(summarizer.summary(df.features)).show(truncate=False)
+--------------------------------+
|aggregate_metrics(features, 1.0)|
+--------------------------------+
|[[1.0,1.5,2.0], 2] |
+--------------------------------+
<BLANKLINE>
>>> df.select(Summarizer.mean(df.features, df.weight)).show(truncate=False)
+--------------+
|mean(features)|
+--------------+
|[1.0,1.0,1.0] |
+--------------+
<BLANKLINE>
>>> df.select(Summarizer.mean(df.features)).show(truncate=False)
+--------------+
|mean(features)|
+--------------+
|[1.0,1.5,2.0] |
+--------------+
<BLANKLINE>
.. versionadded:: 2.4.0
"""
@staticmethod
@since("2.4.0")
def mean(col, weightCol=None):
"""
return a column of mean summary
"""
return Summarizer._get_single_metric(col, weightCol, "mean")
@staticmethod
@since("3.0.0")
def sum(col, weightCol=None):
"""
return a column of sum summary
"""
return Summarizer._get_single_metric(col, weightCol, "sum")
@staticmethod
@since("2.4.0")
def variance(col, weightCol=None):
"""
return a column of variance summary
"""
return Summarizer._get_single_metric(col, weightCol, "variance")
@staticmethod
@since("3.0.0")
def std(col, weightCol=None):
"""
return a column of std summary
"""
return Summarizer._get_single_metric(col, weightCol, "std")
@staticmethod
@since("2.4.0")
def count(col, weightCol=None):
"""
return a column of count summary
"""
return Summarizer._get_single_metric(col, weightCol, "count")
@staticmethod
@since("2.4.0")
def numNonZeros(col, weightCol=None):
"""
return a column of numNonZero summary
"""
return Summarizer._get_single_metric(col, weightCol, "numNonZeros")
@staticmethod
@since("2.4.0")
def max(col, weightCol=None):
"""
return a column of max summary
"""
return Summarizer._get_single_metric(col, weightCol, "max")
@staticmethod
@since("2.4.0")
def min(col, weightCol=None):
"""
return a column of min summary
"""
return Summarizer._get_single_metric(col, weightCol, "min")
@staticmethod
@since("2.4.0")
def normL1(col, weightCol=None):
"""
return a column of normL1 summary
"""
return Summarizer._get_single_metric(col, weightCol, "normL1")
@staticmethod
@since("2.4.0")
def normL2(col, weightCol=None):
"""
return a column of normL2 summary
"""
return Summarizer._get_single_metric(col, weightCol, "normL2")
@staticmethod
def _check_param(featuresCol, weightCol):
if weightCol is None:
weightCol = lit(1.0)
if not isinstance(featuresCol, Column) or not isinstance(weightCol, Column):
raise TypeError("featureCol and weightCol should be a Column")
return featuresCol, weightCol
@staticmethod
def _get_single_metric(col, weightCol, metric):
col, weightCol = Summarizer._check_param(col, weightCol)
return Column(JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer." + metric,
col._jc, weightCol._jc))
@staticmethod
@since("2.4.0")
def metrics(*metrics):
"""
Given a list of metrics, provides a builder that it turns computes metrics from a column.
See the documentation of [[Summarizer]] for an example.
The following metrics are accepted (case sensitive):
- mean: a vector that contains the coefficient-wise mean.
- sum: a vector that contains the coefficient-wise sum.
- variance: a vector tha contains the coefficient-wise variance.
- std: a vector tha contains the coefficient-wise standard deviation.
- count: the count of all vectors seen.
- numNonzeros: a vector with the number of non-zeros for each coefficients
- max: the maximum for each coefficient.
- min: the minimum for each coefficient.
- normL2: the Euclidean norm for each coefficient.
- normL1: the L1 norm of each coefficient (sum of the absolute values).
:param metrics:
metrics that can be provided.
:return:
an object of :py:class:`pyspark.ml.stat.SummaryBuilder`
Note: Currently, the performance of this interface is about 2x~3x slower then using the RDD
interface.
"""
sc = SparkContext._active_spark_context
js = JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer.metrics",
_to_seq(sc, metrics))
return SummaryBuilder(js)
class SummaryBuilder(JavaWrapper):
"""
A builder object that provides summary statistics about a given column.
Users should not directly create such builders, but instead use one of the methods in
:py:class:`pyspark.ml.stat.Summarizer`
.. versionadded:: 2.4.0
"""
def __init__(self, jSummaryBuilder):
super(SummaryBuilder, self).__init__(jSummaryBuilder)
@since("2.4.0")
def summary(self, featuresCol, weightCol=None):
"""
Returns an aggregate object that contains the summary of the column with the requested
metrics.
:param featuresCol:
a column that contains features Vector object.
:param weightCol:
a column that contains weight value. Default weight is 1.0.
:return:
an aggregate column that contains the statistics. The exact content of this
structure is determined during the creation of the builder.
"""
featuresCol, weightCol = Summarizer._check_param(featuresCol, weightCol)
return Column(self._java_obj.summary(featuresCol._jc, weightCol._jc))
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.stat
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.stat.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder \
.master("local[2]") \
.appName("ml.stat tests") \
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
failure_count, test_count = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
| apache-2.0 | -2,931,887,275,862,958,600 | 38.080569 | 100 | 0.601807 | false |
linrio/WhetherOrNotMe | testme.py | 1 | 2028 | # -*- coding utf-8 -*-
import cv2
import os
import numpy as np
from sklearn.model_selection import train_test_split
import random
import tensorflow as tf
def read_data(img_path, image_h = 64, image_w = 64):
image_data = []
label_data = []
image = cv2.imread(img_path)
#cv2.namedWindow("Image")
#cv2.imshow("Image",image)
#cv2.waitKey(0)
h,w,_ = image.shape
longest_edge = max(h,w)
top, bottom, left, right = (0, 0, 0, 0)
dh,dw = (0,0)
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
image_pad = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0])
image = cv2.resize(image_pad, (image_h, image_w))
image_data.append(image)
label_data.append(img_path)
image_data = np.array(image_data)
train_x, test_x, train_y, test_y = train_test_split(image_data, label_data, test_size=0.05,
random_state=random.randint(0, 100))
X = tf.placeholder(tf.float32,[None, 64, 64, 3])
Y = tf.placeholder(tf.float32, [None, 2])
return Y
#img_path = '4833.jpg'
#print(read_data(img_path))
x_data = np.float32(np.random.rand(2,100))
y_data = np.dot([0.100, 0.200], x_data) + 0.300
b = tf.Variable(tf.zeros([1]), name='B')
W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0), name='W')
y = tf.add(tf.matmul(W, x_data, name='MatMul'), b ,name='add')
loss = tf.reduce_mean(tf.square(tf.subtract(y, y_data, name='Sub'), name='Square'), name='ReduceMean')
optimizer = tf.train.GradientDescentOptimizer(0.001, name='Optimizer')
train = optimizer.minimize(loss, name='minimize')
summaries = [tf.summary.histogram('W',W), tf.summary.histogram('b', b), tf.summary.scalar('loss', loss)]
summary_op = tf.summary.merge(summaries)
print(summary_op) | apache-2.0 | 568,228,649,818,764,700 | 33 | 105 | 0.594675 | false |
fedora-conary/conary | conary_test/cvctest/buildtest/buildtest.py | 1 | 64252 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import bz2
import os
import sys
import socket, stat
from testrunner import testhelp
from conary import versions
from conary.lib import util
from conary_test import rephelp
from conary.build import errors
from conary.build import policy, packagepolicy
# for use in many places...
def mockedSaveArgSet(self, real, s, *args):
for arg in args:
if type(arg) in (list, tuple, set):
s.update(arg)
else:
s.add(arg)
if real:
real(self, *args)
# for use in many places...
def mockedUpdateArgs(self, s, *args):
for arg in args:
if type(arg) in (list, tuple, set):
s.update(arg)
else:
s.add(arg)
class XMLCatalogTest(rephelp.RepositoryHelper):
def testXMLCatalogTest1(self):
# This recipe should fail because it lacks a buildRequires
# entry to libxml2:runtime.
recipestr1 = """
class TestXMLCatalog(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.XMLCatalogEntry('sgml-entities.xml',
'delegatePublic',
'-//OASIS//DocBk ',
'%(datadir)s/sgml/docbook/master.xml')
"""
self.assertRaises(policy.PolicyError, self.buildRecipe,
recipestr1, "TestXMLCatalog")
recipestr2 = """
class TestXMLCatalog2(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
buildRequires = [ 'libxml2:runtime' ]
def setup(r):
r.XMLCatalogEntry('a.xml',
'delegatePublic',
'-//OASIS//DocBk ',
'%(datadir)s/sgml/docbook/master.xml')
r.XMLCatalogEntry('a.xml',
'delegatePublic',
'-//OASIS//DocBook ',
'%(datadir)s/sgml/docbook/master2.xml')
r.XMLCatalogEntry('b.xml',
'delegatePublic',
'-//OASIS//SGML Entities ',
'%(datadir)s/sgml/docbook/master3.xml',
catalogDir='/ue')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr2, "TestXMLCatalog2", ignoreDeps=True)
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
# There's not really a good way to test to see if this file
# was created correctly, so we just test to see if an
# expected number of lines are present. Note: this line
# count could change with future versions of libxml2.
F = file(util.joinPaths(self.workDir, '/etc/xml/a.xml'))
lines = F.readlines()
assert(len(lines) == 6)
F.close()
assert(os.lstat(util.joinPaths(self.workDir, 'ue/b.xml')))
class SGMLCatalogTest(rephelp.RepositoryHelper):
def testSGMLCatalogTest1(self):
# This recipe should fail because it lacks a buildRequires
# entry to libxml2:runtime.
recipestr1 = """
class TestSGMLCatalog(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.SGMLCatalogEntry('sgml-entities.cat', 'foot.cat')
"""
self.assertRaises(policy.PolicyError, self.buildRecipe,
recipestr1, "TestSGMLCatalog")
recipestr2 = """
class TestSGMLCatalog(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
buildRequires = ['libxml2:runtime']
def setup(r):
r.SGMLCatalogEntry('sgml-entities.cat', 'foo.cat')
r.SGMLCatalogEntry('sgml-entities.cat', 'bar.cat')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr2, "TestSGMLCatalog", ignoreDeps=True)
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
F = file(util.joinPaths(self.workDir, '/etc/sgml/sgml-entities.cat'))
lines = F.readlines()
list = ['CATALOG "bar.cat"\n',
'CATALOG "foo.cat"\n']
lines.sort()
assert(lines == list)
F.close()
class PutFilesTest(rephelp.RepositoryHelper):
def testPutFilesTest1(self):
"""
Test _PutFiles
"""
recipestr = """
class TestPutFiles(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.Create('this', 'that')
self.Install('this', 'that', '/foo')
"""
self.assertRaises(errors.CookError, self.buildRecipe,
recipestr, "TestPutFiles")
def testPutFilesTest2(self):
recipestr = """
class TestPutFiles(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.Create('this', 'that')
self.Install('t*', '/foo')
"""
self.assertRaises(TypeError, self.buildRecipe,
recipestr, "TestPutFiles")
def testPutFilesTest3(self):
recipestr = """
class TestPutFiles(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.Create('foo', 'bar', 'baz')
self.MakeDirs('a')
self.Create('a/{foo,bar,baz}')
self.Install('foo', '/')
self.MakeDirs('/b')
self.Install('foo', 'bar', 'baz', '/b/')
self.Install('a', '/c/')
self.Install('a', '/z/')
self.Move('/z', '/d')
self.Install('a', '/z/')
self.Move('/z/*', '/e/')
self.Copy('/e/*', '/f')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr, "TestPutFiles")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
assert(os.lstat(util.joinPaths(self.workDir, 'foo')))
assert(os.lstat(util.joinPaths(self.workDir, 'b/foo')))
assert(os.lstat(util.joinPaths(self.workDir, 'b/bar')))
assert(os.lstat(util.joinPaths(self.workDir, 'b/baz')))
assert(os.lstat(util.joinPaths(self.workDir, 'c/a/foo')))
assert(os.lstat(util.joinPaths(self.workDir, 'c/a/bar')))
assert(os.lstat(util.joinPaths(self.workDir, 'c/a/baz')))
assert(os.lstat(util.joinPaths(self.workDir, 'd/a/foo')))
assert(os.lstat(util.joinPaths(self.workDir, 'd/a/bar')))
assert(os.lstat(util.joinPaths(self.workDir, 'd/a/baz')))
assert(os.lstat(util.joinPaths(self.workDir, 'e/a/foo')))
assert(os.lstat(util.joinPaths(self.workDir, 'e/a/bar')))
assert(os.lstat(util.joinPaths(self.workDir, 'e/a/baz')))
assert(os.lstat(util.joinPaths(self.workDir, 'f/a/foo')))
assert(os.lstat(util.joinPaths(self.workDir, 'f/a/bar')))
assert(os.lstat(util.joinPaths(self.workDir, 'f/a/baz')))
self.assertRaises(OSError, os.lstat,
util.joinPaths(self.workDir, 'z/a/foo'))
self.assertRaises(OSError, os.lstat,
util.joinPaths(self.workDir, 'z/a/bar'))
self.assertRaises(OSError, os.lstat,
util.joinPaths(self.workDir, 'z/a/baz'))
def testPutFilesTest4(self):
# test strange names
recipestr = """
class TestPutFiles(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.Create('[b]ar', '*ar', 'bar')
# note -- you can't quote metachars in file names -- \* will not
# escape the * --
# but we can make sure that metacharacters in file names are not
# expanded twice when those files are caught by a glob
self.Install('*', '/strangeNames/')
self.Doc('*')
self.Remove('*')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr, "TestPutFiles")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
assert(os.lstat(util.joinPaths(self.workDir, '/strangeNames/[b]ar')))
assert(os.lstat(util.joinPaths(self.workDir, '/strangeNames/*ar')))
assert(os.lstat(util.joinPaths(self.workDir, '/strangeNames/bar')))
assert(os.lstat(util.joinPaths(self.workDir,
'/usr/share/doc/test-0/[b]ar')))
assert(os.lstat(util.joinPaths(self.workDir,
'/usr/share/doc/test-0/*ar')))
assert(os.lstat(util.joinPaths(self.workDir,
'/usr/share/doc/test-0/bar')))
def testPutFilesTest5(self):
recipestr = """
class TestPutFiles(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('this')
r.Move('this')
"""
self.assertRaises(errors.CookError, self.buildRecipe,
recipestr, "TestPutFiles")
def testUnmatchedPutFilesTest(self):
recipestr = """
class TestPutFiles(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.MakeDirs("/foo")
self.Copy('/foo/*', '/bar', allowNoMatch = True)
"""
self.logFilter.add()
self.buildRecipe(recipestr, "TestPutFiles")
self.logFilter.remove()
self.assertEquals(self.logFilter.records[0],
"warning: Copy: No files matched: '/foo/*'")
def testUnmatchedPutFilesTest2(self):
recipestr = """
class TestPutFiles(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.MakeDirs("/foo")
self.Copy('/foo/*', '/bar')
"""
self.assertRaises(RuntimeError,
self.buildRecipe, recipestr, "TestPutFiles")
def testPutFilesPermissionOverride(self):
# CNY-1634
recipestr = """
class TestPutFiles(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('/file', contents = 'old', mode = 0444)
r.Run('echo new > new')
r.Install('new', '/file')
"""
self.buildRecipe(recipestr, "TestPutFiles")
self.updatePkg('test')
self.verifyFile(self.rootDir + '/file', 'new\n')
def testPutSymlinks(self):
recipestr = """
class TestPutFiles(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('/dir1/foo1', contents='abc', mode=0755)
# Link to an existing file
r.Symlink('foo1', '/dir1/foo5')
# Dangling symlink
r.Symlink('/dir1/foo1', '/dir2/foo2')
r.Install('/dir2/foo2', '/dir3/', preserveSymlinks=True)
r.Install('/dir2/foo2', '/dir4/foo4', preserveSymlinks=True)
# This should simply copy the file
r.Install('/dir1/foo5', '/dir5/foo5')
# This is just to test modes for Install
r.Install('/dir1/foo1', '/dir1/foo600', mode=0600)
"""
self.reset()
(built, d) = self.buildRecipe(recipestr, "TestPutFiles")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
sl755 = util.joinPaths(self.workDir, '/dir1/foo1')
sl600 = util.joinPaths(self.workDir, '/dir1/foo600')
sl1 = util.joinPaths(self.workDir, '/dir1/foo5')
sl2 = util.joinPaths(self.workDir, '/dir2/foo2')
# Test modes
self.assertFalse(os.path.islink(sl755))
self.assertEqual(0755, os.lstat(sl755)[stat.ST_MODE] & 0755)
self.assertFalse(os.path.islink(sl600))
self.assertEqual(0600, os.lstat(sl600)[stat.ST_MODE] & 0600)
self.assertTrue(os.path.islink(sl1))
self.assertTrue(os.path.islink(sl2))
sl3 = util.joinPaths(self.workDir, '/dir3/foo2')
sl4 = util.joinPaths(self.workDir, '/dir4/foo4')
sl5 = util.joinPaths(self.workDir, '/dir5/foo5')
self.assertTrue(os.path.islink(sl3))
self.assertTrue(os.path.islink(sl4))
self.assertFalse(os.path.islink(sl5))
def testSymlinkGlobbing(self):
recipestr = r"""
class TestSymlinkGlobbing(PackageRecipe):
name = 'test'
version = '1'
clearBuildReqs()
def setup(r):
r.Create("/usr/srcfiles/file1", contents="Contents file1\n")
r.Create("/usr/srcfiles/file2", contents="Contents file2\n")
r.Symlink("/usr/srcfiles/*", "/usr/symlinks/")
"""
self.reset()
(built, d) = self.buildRecipe(recipestr, "TestSymlinkGlobbing")
repos = self.openRepository()
self.updatePkg(self.rootDir, 'test')
symlinksdir = os.path.join(self.rootDir, 'usr/symlinks')
contents = os.listdir(symlinksdir)
contents.sort()
self.assertEqual(contents, ['file1', 'file2'])
for f in contents:
lpath = os.path.join(symlinksdir, f)
fpath = os.readlink(lpath)
if fpath[0] != '/':
# Relative link
fpath = os.path.join(symlinksdir, fpath)
self.assertEqual(open(fpath).read(), "Contents %s\n" % f)
def testUnmatchedSymlinkGlobbing(self):
recipestr = r"""
class TestSymlinkGlobbing(PackageRecipe):
name = 'test'
version = '1'
clearBuildReqs()
def setup(r):
r.MakeDirs("/usr/srcfiles")
r.Symlink("/usr/srcfiles/*", "/usr/symlinks/")
"""
self.assertRaises(RuntimeError, self.buildRecipe, recipestr, "TestSymlinkGlobbing")
def testUnmatchedSymlinkGlobbing2(self):
recipestr = r"""
class TestSymlinkGlobbing(PackageRecipe):
name = 'test'
version = '1'
clearBuildReqs()
def setup(r):
r.MakeDirs("/usr/srcfiles")
r.Symlink("/usr/srcfiles/*", "/usr/symlinks/", allowNoMatch = True)
"""
self.reset()
self.logFilter.add()
(built, d) = self.buildRecipe(recipestr, "TestSymlinkGlobbing")
self.logFilter.remove()
self.assertFalse("warning: Symlink: No files matched: '/usr/srcfiles/*'" \
not in self.logFilter.records)
class ManifestTest(rephelp.RepositoryHelper):
def testManifest(self):
recipestr = r"""
class TestManifest(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
# ensure that ordering is correct by specifying PackageSpec first
r.PackageSpec('asdf', '/asdf/asdf')
r.Create('Makefile', contents='\n'.join((
'install:',
'\tmkdir -p $(DESTDIR)/foo/blah',
'\ttouch $(DESTDIR)/foo/blah/bar',
'\tmkdir -p $(DESTDIR)/asdf',
'\ttouch $(DESTDIR)/asdf/asdf',
)))
r.MakeInstall(package='foo')
r.ExcludeDirectories(exceptions='/foo')
r.Create('/blah/test')
r.Create('/oddcomp', package=':oddcomp')
r.Run('touch %(destdir)s/asdf/fdsa', package='asdf:fdsa')
# test empty manifests
r.Run('true', package=':doesnotexist')
# test skipping missing subdirectories
r.MakeInstall(dir='non-existent', skipMissingDir=True, package='nonesuch')
r.Create('testfileaction')
r.Install('testfileaction', '/var/', package=':testfileaction')
# ensure that the manifests are correct, easiest done
# from within the build
r.Create('asdf.0.manifest', contents='/asdf/fdsa')
r.Create('foo.0.manifest', contents='\n'.join((
'/asdf',
'/asdf/asdf',
'/foo',
'/foo/blah',
'/foo/blah/bar',
)))
r.Create('.0.manifest', contents = '/oddcomp')
r.Create('nonesuch.0.manifest')
r.Run('cmp foo.0.manifest ../_MANIFESTS_/foo.0.manifest')
r.Run('cmp asdf.0.manifest ../_MANIFESTS_/asdf.0.manifest')
#r.Run('cp .0.manifest /tmp/; cp ../_MANIFESTS_/.0.manifest /tmp/acutal.0.manifest')
r.Run('cmp .0.manifest ../_MANIFESTS_/.0.manifest')
r.Run('cmp nonesuch.0.manifest ../_MANIFESTS_/nonesuch.0.manifest')
r.Remove('asdf.0.manifest', 'foo.0.manifest')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr, "TestManifest")
repos = self.openRepository()
for troveName, troveVersion, troveFlavor in built:
troveVersion = versions.VersionFromString(troveVersion)
trove = repos.getTrove(troveName, troveVersion, troveFlavor)
for pathId, path, fileId, version, fileObj in repos.iterFilesInTrove(
trove.getName(), trove.getVersion(), trove.getFlavor(),
withFiles=True):
assert path != '/blah'
if path == '/foo':
assert trove.getName() == 'foo:runtime'
if path == '/oddcomp':
assert trove.getName() == 'test:oddcomp'
if path == '/foo/blah/bar':
assert trove.getName() == 'foo:runtime'
if path == '/blah/test':
assert trove.getName() == 'test:runtime'
if path == '/asdf/asdf':
assert trove.getName() == 'asdf:runtime'
if path == '/asdf/fdsa':
assert trove.getName() == 'asdf:fdsa'
if path == '/var/testfileaction':
assert trove.getName() == 'test:testfileaction'
class LinkTest(rephelp.RepositoryHelper):
"""
Test creating hard links
"""
def testLinkTestBad(self):
recipestr1 = """
class TestLink(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.Link('this/is/broken', '')
"""
self.assertRaises(errors.CookError, self.buildRecipe,
recipestr1, "TestLink")
def testLinkTestGood(self):
recipestr2 = """
class TestLink(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.macros.foo = '/foo'
r.macros.bar = 'bar'
r.Create('%(foo)s',
contents='ABCDEFGABCDEFGABCDEFGABCDEFG%(destdir)s/')
r.Link('%(bar)s', 'blah', '%(foo)s')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr2, "TestLink")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
a = os.lstat(util.joinPaths(self.workDir, 'foo'))
b = os.lstat(util.joinPaths(self.workDir, 'bar'))
c = os.lstat(util.joinPaths(self.workDir, 'blah'))
assert(a[stat.ST_INO] == b[stat.ST_INO])
assert(b[stat.ST_INO] == c[stat.ST_INO])
def testLinkDir(self):
recipe1 = """
class FooRecipe(PackageRecipe):
name = 'foo'
version = '1'
clearBuildReqs()
def setup(r):
r.MakeDirs('/var/foo', '/var/bar/')
r.Create('/var/foo/testme', contents='arbitrary data')
r.Link('/var/foo/tested', '/var/foo/testme')
"""
(built, d) = self.buildRecipe(recipe1, "FooRecipe")
self.updatePkg(built[0][0])
assert(os.lstat(self.rootDir + '/var/foo/testme').st_ino ==
os.lstat(self.rootDir + '/var/foo/tested').st_ino)
class MakeDirsTest(rephelp.RepositoryHelper):
def testMakeDirsTest1(self):
"""
Test creating directories
"""
recipestr1 = """
class TestMakeDirs(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.MakeDirs('foo')
self.Run('ls foo')
"""
(built, d) = self.buildRecipe(recipestr1, "TestMakeDirs")
recipestr2 = """
class TestMakeDirs(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.MakeDirs('/bar/blah')
self.ExcludeDirectories(exceptions='/bar/blah')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr2, "TestMakeDirs")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
assert(stat.S_ISDIR(
os.lstat(util.joinPaths(self.workDir, '/bar/blah'))[stat.ST_MODE]))
class SugidTest(rephelp.RepositoryHelper):
def testSugidTest1(self):
"""
Test to make sure that setu/gid gets restored.
Warning: this won't catch variances when running as root!
"""
recipestr1 = """
class TestSugid(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.Create('%(essentialbindir)s/a', mode=06755)
"""
self.reset()
(built, d) = self.buildRecipe(recipestr1, "TestSugid")
self.mimicRoot()
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
self.realRoot()
a = os.lstat(util.joinPaths(self.workDir, 'bin/a'))
assert (a.st_mode & 07777 == 06755)
class CreateTest(rephelp.RepositoryHelper):
def testCreateTest1(self):
"""
Test creating files directly
"""
recipestr1 = """
class TestCreate(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.Create(('/a', '/b'))
self.Create('/c', '/d', contents='ABCDEFGABCDEFGABCDEFGABCDEFG')
self.Create('/e', contents='%(essentialbindir)s')
self.Create('/f', contents='%(essentialbindir)s', macros=False)
self.Create('%(essentialbindir)s/{g,h}', mode=0755)
"""
self.reset()
(built, d) = self.buildRecipe(recipestr1, "TestCreate")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
a = os.lstat(util.joinPaths(self.workDir, 'a'))
b = os.lstat(util.joinPaths(self.workDir, 'b'))
F = file(util.joinPaths(self.workDir, 'c'))
c = F.read()
F.close
F = file(util.joinPaths(self.workDir, 'd'))
d = F.read()
F.close
F = file(util.joinPaths(self.workDir, 'e'))
e = F.read()
F.close
F = file(util.joinPaths(self.workDir, 'e'))
e = F.read()
F.close
F = file(util.joinPaths(self.workDir, 'f'))
f = F.read()
F.close
g = os.lstat(util.joinPaths(self.workDir, '/bin/g'))
h = os.lstat(util.joinPaths(self.workDir, '/bin/g'))
assert (a.st_size == 0)
assert (b.st_size == 0)
assert (c == 'ABCDEFGABCDEFGABCDEFGABCDEFG\n')
assert (d == 'ABCDEFGABCDEFGABCDEFGABCDEFG\n')
assert (e == '/bin\n')
assert (f == '%(essentialbindir)s\n')
assert (g.st_mode & 0777 == 0755)
assert (h.st_mode & 0777 == 0755)
class SymlinkTest(rephelp.RepositoryHelper):
def testSymlinkTest1(self):
"""
Test creating files directly
"""
recipestr1 = """
class TestSymlink(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('/a')
r.Symlink('/one/argument')
"""
self.assertRaises(errors.CookError, self.buildRecipe,
recipestr1, "TestSymlink")
def testSymlinkTest2(self):
recipestr2 = """
class TestSymlink(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Symlink('/asdf/foo', '/bar/blah')
r.DanglingSymlinks(exceptions='.*')
"""
self.buildRecipe(recipestr2, "TestSymlink")
class DocTest(rephelp.RepositoryHelper):
def exists(self, file):
return os.path.exists(self.workDir + file)
def testDocs(self):
recipestr1 = """
class TestDocs(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('README')
r.Doc('README')
r.Create('docs/README.too')
r.Doc('docs/')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr1, "TestDocs")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
docdir = '/usr/share/doc/test-0/'
for file in 'README', 'docs/README.too':
assert(self.exists(docdir + file))
class ConfigureTest(rephelp.RepositoryHelper):
def testConfigure(self):
recipestr1 = """
class TestConfigure(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addSource('configure', mode=0755, contents='''#!/bin/sh
exit 0
''')
r.Configure()
r.Create('/asdf/foo')
"""
(built, d) = self.buildRecipe(recipestr1, "TestConfigure")
# make sure that the package doesn't mention the bootstrap
# bootstrap flavor
assert(built[0][2].isEmpty())
def testConfigureSubDirMissingOK(self):
recipestr1 = """
class TestConfigure(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addSource('configure', mode=0755, contents='''#!/bin/sh
touch mustNotExist
exit 0
''')
r.Configure(subDir='missing', skipMissingSubDir=True)
r.Run('test -f mustNotExist && exit 1 ; exit 0')
r.Create('/asdf/foo')
"""
(built, d) = self.buildRecipe(recipestr1, "TestConfigure")
def testConfigureSubDirMissingBad(self):
recipestr1 = """
class TestConfigure(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addSource('configure', mode=0755, contents='''#!/bin/sh
exit 0
''')
r.Configure(subDir='missing')
r.Create('/asdf/foo')
"""
self.assertRaises(RuntimeError, self.buildRecipe,
recipestr1, "TestConfigure")
def testConfigureLocal(self):
recipestr1 = """
class TestConfigure(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addSource('configure', mode=0755, contents='''#!/bin/sh -x
echo "$CONFIG_SITE" > $1
''')
r.MakeDirs('/make', '/conf')
r.ManualConfigure('%(destdir)s/conf/target')
r.ManualConfigure('%(destdir)s/conf/local', local=True)
r.Make('%(destdir)s/make/target', makeName='./configure')
r.Make('%(destdir)s/make/local', local=True, makeName='./configure')
# run again to make sure any state changed by Make was restored.
r.ManualConfigure('%(destdir)s/conf/target')
r.ManualConfigure('%(destdir)s/conf/local', local=True)
"""
self.overrideBuildFlavor('is:x86 target: x86_64')
(built, d) = self.buildRecipe(recipestr1, "TestConfigure")
self.updatePkg('test[is:x86 target:x86_64]')
for dir in ('%s/make/', '%s/conf'):
dir = dir % self.cfg.root
self.verifyFile('%s/local' % dir,
' '.join([ '%s/%s' % (self.cfg.siteConfigPath[0], x)
for x in ('x86', 'linux')]) + '\n')
self.verifyFile('%s/target' % dir,
' '.join([ '%s/%s' % (self.cfg.siteConfigPath[0], x)
for x in ('x86_64', 'linux')]) + '\n')
def testConfigureMissingReq(self):
recipestr = """
class TestConfigure(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addSource('configure', mode=0755, contents='''#!/bin/sh
echo "$0: line 2000: foo: command not found"
# exit 1
''')
r.ManualConfigure()
r.Create('/opt/foo')
"""
self.logFilter.add()
self.assertRaises(RuntimeError, self.buildRecipe,
recipestr.replace('# exit', 'exit'),
"TestConfigure", logBuild = True)
self.logFilter.remove()
self.logFilter.regexpCompare([
'error: .*',
'warning: ./configure: line 2000: foo: command not found',
'warning: Failed to find possible build requirement for path "foo"',
])
# now repeat with foo in the repository but not installed
self.addComponent('foo:runtime', '1', fileContents = [
('/usr/bin/foo', rephelp.RegularFile(contents="", perms=0755)),])
repos = self.openRepository()
self.logFilter.add()
reportedBuildReqs = set()
self.mock(packagepolicy.reportMissingBuildRequires, 'updateArgs',
lambda *args:
mockedSaveArgSet(args[0], None, reportedBuildReqs, *args[1:]))
(built, d) = self.buildRecipe(recipestr, "TestConfigure",
logBuild = True, repos=repos)
self.logFilter.remove()
self.logFilter.compare([
'warning: ./configure: line 2000: foo: command not found',
"warning: Some missing buildRequires ['foo:runtime']",
])
self.assertEquals(reportedBuildReqs, set(['foo:runtime']))
self.unmock()
# now test with absolute path in error message
self.logFilter.add()
(built, d) = self.buildRecipe(recipestr.replace(
'foo: command not found', '/usr/bin/foo: command not found'),
"TestConfigure",
logBuild = True)
self.logFilter.remove()
self.logFilter.regexpCompare([
'warning: .*: /usr/bin/foo: command not found',
r"warning: Some missing buildRequires \['foo:runtime'\]",
])
# test that the logfile got the warning message
client = self.getConaryClient()
repos = client.getRepos()
nvf = [x for x in built if x[0] == 'test:debuginfo'][0]
nvf = repos.findTrove(self.cfg.buildLabel, nvf)
fileDict = client.getFilesFromTrove(*nvf[0])
fileObj = fileDict['/usr/src/debug/buildlogs/test-0-log.bz2']
b = bz2.BZ2Decompressor()
buildLog = b.decompress(fileObj.read())
self.assertFalse( \
"warning: Suggested buildRequires additions: ['foo:runtime']" \
not in buildLog)
# finally repeat with foo installed, not just in repository
self.updatePkg('foo:runtime')
self.logFilter.add()
reportedBuildReqs = set()
self.mock(packagepolicy.reportMissingBuildRequires, 'updateArgs',
lambda *args:
mockedSaveArgSet(args[0], None, reportedBuildReqs, *args[1:]))
(built, d) = self.buildRecipe(recipestr, "TestConfigure",
logBuild = True)
self.logFilter.remove()
self.logFilter.compare([
'warning: ./configure: line 2000: foo: command not found',
"warning: Some missing buildRequires ['foo:runtime']",
])
self.assertEquals(reportedBuildReqs, set(['foo:runtime']))
def testConfigureMissingReq2(self):
"""
test that regexp matching is not fooled by dir argument
"""
recipestr1 = """
class TestConfigure(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('configure', mode=0755, contents='''#!/bin/sh
echo "/random/path/configure: line 2000: foo: command not found"
''')
r.ManualConfigure('')
r.Create('/opt/foo')
"""
self.logFilter.add()
(built, d) = self.buildRecipe(recipestr1, "TestConfigure",
logBuild = True)
self.logFilter.remove()
self.logFilter.compare([
'warning: /random/path/configure: line 2000: foo: '
'command not found',
'warning: Failed to find possible build requirement for path "foo"',
])
class CMakeTest(rephelp.RepositoryHelper):
def testCMake(self):
if not util.checkPath('cmake'):
raise testhelp.SkipTestException('cmake not installed')
recipestr1 = """
class TestCMake(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addSource('CMakeLists.txt', contents = '''\
PROJECT(floo)
ADD_EXECUTABLE(floo floo.c)
''')
r.addSource('floo.c', contents = '''
int main()
{
return 0;
}
''')
r.CMake()
r.Make()
r.Copy('floo', '/usr/bin/floo')
"""
(built, d) = self.buildRecipe(recipestr1, "TestCMake")
def testCMakeSubDir(self):
if not util.checkPath('cmake'):
raise testhelp.SkipTestException('cmake not installed')
# Same as previous test, but run in a subdir
recipestr1 = """
class TestCMake(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('floo/CMakeLists.txt', contents = '''\
PROJECT(floo)
''')
r.CMake(dir = 'floo')
r.Copy('floo/Makefile', '/usr/share/floo/')
"""
(built, d) = self.buildRecipe(recipestr1, "TestCMake")
class RemoveTest(rephelp.RepositoryHelper):
def testRemove(self):
recipestr1 = """
class TestRemove(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.MakeDirs('a/b')
r.Create('a/file')
r.Install('a', '/a')
r.Remove('/a/*')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr1, "TestRemove")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
def testRemoveRecursive(self):
# Test for CNY-69
recipestr1 = """
class TestRemove(PackageRecipe):
name = 'testr'
version = '0.1'
clearBuildReqs()
def setup(r):
r.Create("%(datadir)s/%(name)s/dir1/file1", contents="file1")
r.Create("%(datadir)s/%(name)s/dir1/dir2/file2", contents="file2")
r.Create("%(datadir)s/%(name)s/dir1/dir2/dir3/file3", contents="file3")
r.Create("%(datadir)s/%(name)s/dir1/dir2/dir5/file4", contents="file4")
r.Remove("%(datadir)s/%(name)s/dir1/dir2", recursive=True)
"""
repos = self.openRepository()
oldVal = self.cfg.cleanAfterCook
self.cfg.cleanAfterCook = False
try:
(build, d) = self.buildRecipe(recipestr1, "TestRemove")
finally:
self.cfg.cleanAfterCook = oldVal
dr = os.path.join(self.workDir, '../build/testr/_ROOT_',
'usr/share/testr')
self.assertEqual(os.listdir(dr), ["dir1"])
def testUnmatchedRemove(self):
recipestr = """
class TestRemove(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.MakeDirs('/a')
"""
self.reset()
err = self.assertRaises(RuntimeError,
self.buildRecipe, recipestr + "r.Remove(r.glob('/a/*'))\n",
"TestRemove")
assert(str(err) == "Remove: No files matched: Glob('/a/*')")
err = self.assertRaises(RuntimeError,
self.buildRecipe, recipestr + "r.Remove('/a/*')\n",
"TestRemove")
assert(str(err) == "Remove: No files matched: '/a/*'")
err = self.assertRaises(RuntimeError,
self.buildRecipe,
recipestr + "r.Remove(r.glob('/a/*'), '/b/*')\n",
"TestRemove")
assert(str(err) == "Remove: No files matched: (Glob('/a/*'), '/b/*')")
def testUnmatchedRemove2(self):
recipestr = """
class TestRemove(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.MakeDirs('/a')
r.Remove('/a/*', allowNoMatch = True)
"""
self.reset()
self.logFilter.add()
(built, d) = self.buildRecipe(recipestr, "TestRemove")
self.logFilter.remove()
self.assertEquals(self.logFilter.records[0],
"warning: Remove: No files matched: '/a/*'")
class BuildLabelTest(rephelp.RepositoryHelper):
def testBuildLabel(self):
recipestr1 = """
class TestBuildLabel(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
assert(r.macros.buildlabel == 'localhost@rpl:linux')
assert(r.macros.buildbranch == '/localhost@rpl:linux')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr1, "TestBuildLabel")
class ConsoleHelperTest(rephelp.RepositoryHelper):
def testConsoleHelper(self):
recipestr1 = """
class TestConsoleHelper(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
# test default values
r.Create('%(sbindir)s/foo', mode=0755)
r.ConsoleHelper('%(bindir)s/foo', '%(sbindir)s/foo')
# test non-default values
r.Create('%(sbindir)s/bar', mode=0755)
r.ConsoleHelper('%(bindir)s/bar', '%(sbindir)s/bar',
consoleuser=True, timestamp=True, targetuser='<user>',
session=False, fallback=True, noxoption='--haha',
otherlines=['ONE=one', 'TWO=two'])
r.Create('%(sbindir)s/blah', mode=0755)
r.Create('blah.pam', contents='TESTING')
r.ConsoleHelper('%(bindir)s/blah', '%(sbindir)s/blah',
pamfile='blah.pam')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr1, "TestConsoleHelper")
for p in built:
self.updatePkg(self.workDir, p[0], p[1], depCheck=False)
F = file(util.joinPaths(self.workDir, '/etc/security/console.apps/foo'))
fooC = F.readlines()
F.close
assert(len(fooC) == 2)
F = file(util.joinPaths(self.workDir, '/etc/security/console.apps/bar'))
barC = F.readlines()
F.close
assert(len(barC) == 7)
F = file(util.joinPaths(self.workDir, '/etc/pam.d/foo'))
fooP = F.readlines()
F.close
assert(len(fooP) == 4)
F = file(util.joinPaths(self.workDir, '/etc/pam.d/bar'))
barP = F.readlines()
F.close
assert(len(barP) == 8)
F = file(util.joinPaths(self.workDir, '/etc/pam.d/blah'))
blahP = F.readlines()
F.close
assert(len(blahP) == 1)
class ReplaceTest(rephelp.RepositoryHelper):
def testReplace(self):
recipestr = r"""
class TestReplace(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addSource('bar', contents='abcdef\nabcdef\n')
r.Create('/asdf/foo', contents='abcdef\nabcdef\n')
r.Create('/asdf/multiReplace', contents='abcdef\nabcdef\n')
r.Replace('(a.*f)', r'\1g', 'bar', '/asdf/foo', lines=1)
r.Install('bar', '/asdf/bar')
r.addSource('bar2', contents='someotherstuff')
r.Replace('notmatching', '', 'bar2', allowNoChange=True)
r.Replace(('a', 'b'), ('b', 'c'), ('c', 'd'), '/asdf/multiReplace')
r.Replace(('a', 'b'), '/asdf', allowNoChange=True)
# now test regexp line limiter
r.Create('/bar3', contents='several1\nseveral2\nseveral3\n')
r.Replace('several', 'many', '/bar3', lines='1$')
"""
(built, d) = self.buildRecipe(recipestr, "TestReplace")
for p in built:
self.updatePkg(self.workDir, p[0], p[1], depCheck=False)
self.verifyFile(self.workDir + '/asdf/foo', 'abcdefg\nabcdef\n')
self.verifyFile(self.workDir + '/asdf/bar', 'abcdefg\nabcdef\n')
self.verifyFile(self.workDir + '/asdf/multiReplace', 'ddddef\nddddef\n')
self.verifyFile(self.workDir + '/bar3', 'many1\nseveral2\nseveral3\n')
def testFailReplaceInit(self):
recipestr = r"""
class TestReplace(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
# test all the init-time failures in the recipe
try:
r.Replace('a')
except TypeError, err:
if err.args[0].endswith('not enough arguments'):
r.Create('/asdf/passed1')
try:
r.Replace('a', 'b')
except TypeError, err:
if err.args[0].endswith('not enough arguments: no file glob supplied'):
r.Create('/asdf/passed2')
try:
r.Replace('a', 'b', 'foo', lines=(0,30))
except RuntimeError, err:
if err.args[0].endswith('Replace() line indices start at 1, like sed'):
r.Create('/asdf/passed3')
try:
r.Replace('a', '\1', 'foo')
except TypeError, msg:
if msg.args[0].find(
'Illegal octal character in substitution string') != -1:
r.Create('/asdf/passed4')
try:
r.Replace('aa', 'bb', '')
except TypeError, msg:
if msg.args[0].find(
'empty file path specified to Replace') != -1:
r.Create('/asdf/passed5')
"""
(built, d) = self.buildRecipe(recipestr, "TestReplace")
for p in built:
self.updatePkg(self.workDir, p[0], p[1], depCheck=False)
for i in range(1,6):
assert(os.path.exists(self.workDir + '/asdf/passed%d' % i))
def testFailReplace(self):
emptyRecipe = r"""
class TestReplace(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('/asdf/foo', contents='abc\n')
"""
recipe2 = emptyRecipe + 'r.Replace("a", "b", "nonexistant")'
self.assertRaises(RuntimeError, self.buildRecipe,
recipe2, "TestReplace")
recipe3 = emptyRecipe + 'r.Replace(("nosuchpattern", "b"), "/asdf/foo")'
self.assertRaises(RuntimeError, self.buildRecipe,
recipe3, "TestReplace")
def testReplaceNonRegular(self):
recipe = r"""
class TestReplace(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('/bar', contents='abc\n')
r.Symlink('bar', '/foo')
r.Replace(("foo", "bar"), "/foo")
r.Create('build', contents='fdsa\n')
r.Symlink('build', 'link')
r.Replace('fdsa', 'f', 'link')
r.Symlink('/absolute', '/dangling')
r.Create('/absolute', contents='asd\n')
r.Replace('asd', 'qwe', '/dangling')
"""
self.logCheck(self.buildRecipe, (recipe, "TestReplace"), [
'warning: /foo is not a regular file, not applying Replace',
'warning: link is not a regular file, not applying Replace',
'warning: /dangling is not a regular file, not applying Replace'
])
class DirSyntaxTest(rephelp.RepositoryHelper):
def testDirSyntax(self):
recipestr1 = r"""
class Foo(PackageRecipe):
name = 'foo'
version = '1.0'
clearBuildReqs()
def setup(r):
r.Create('/asdf/foo', mode=0700)
r.Create('asdf/foo', mode=0777)
r.Run('ls foo', dir='/asdf')
r.Run('ls foo', dir='asdf')
r.Move('/asdf/foo', 'asdf/bar')
r.Create('/asdf/foo', mode=0700, component=':comp1')
r.Run('ls bar', dir='asdf')
r.Copy('asdf/bar', '/asdf/bar')
r.Run('ls bar', dir='/asdf')
r.Copy('/asdf/bar', 'asdf/bar2')
r.Run('ls bar2', dir='asdf')
r.Copy('/asdf/bar', '/asdf/bar2', component=':comp2')
r.Remove('asdf/bar')
r.Remove('/asdf/bar')
r.Move('asdf/foo', '/asdf/bar')
r.Remove('/asdf/bar')
r.MakeDirs('/asdf/destsubdir', 'asdf/buildsubdir')
r.MakeDirs('/asdf/destsubdir2', component=':comp3', mode=0700)
r.Run('ls destsubdir', dir='/asdf')
r.Run('ls buildsubdir', dir='asdf')
r.Symlink('destsubdir', '/asdf/symlink', component=':comp4')
r.Run('ls symlink', dir='/asdf')
r.Symlink('buildsubdir', 'asdf/symlink')
r.Run('ls symlink', dir='asdf')
r.Symlink('%(builddir)s/asdf/bar2', 'asdf/buildsubdir')
r.Run('ls buildsubdir/bar2', dir='asdf')
r.SetModes('asdf/bar2', '/asdf/destsubdir', 0700)
r.Run('''
mkdir %(destdir)s/builddir
cat > %(destdir)s/builddir/hello.c <<'EOF'
#include <stdio.h>
int main(void) {
return printf("Hello, world.\\\\n");
}
EOF
''')
r.Make('hello', preMake='LDFLAGS="-static"', subDir='/builddir')
r.Run('''
mkdir builddir
cat > builddir/hello.c <<'EOF'
#include <stdio.h>
int main(void) {
return printf("Hello, world.\\\\n");
}
EOF
''')
r.Make('hello', preMake='LDFLAGS="-static"', subDir='builddir')
r.Create('/builddir/configure', mode=0755, contents='''\
#!/bin/sh
echo hello > config.out
''')
r.Configure(subDir='/builddir')
r.Run('ls config.out', dir='/builddir')
r.Create('builddir/configure', mode=0755, contents='''\
#!/bin/sh
echo hello > config.out
''')
r.Configure(subDir='builddir')
r.Run('ls config.out', dir='builddir')
r.Install('builddir/config.out', 'builddir/config.out2')
r.Run('ls config.out2', dir='builddir')
r.Install('builddir/config.out', '/builddir/config.out2', component='comp5')
r.Run('ls config.out2', dir='/builddir')
r.Install('/builddir/config.out', '/builddir/config.out3', component='package2:comp1')
r.Run('ls config.out3', dir='/builddir')
r.Install('/builddir/config.out', 'builddir/config.out3')
r.Run('ls config.out3', dir='builddir')
r.MakeDirs('/usr/share/%%bar', mode=0700)
r.Run('ls %(destdir)s/usr/share/%%bar')
r.Doc('/builddir/config.*', component='package2:comp2')
r.Run('ls config.out', dir='%(thisdocdir)s')
r.Doc('builddir/config.*', component='package2:comp2', subdir='subdir')
r.Run('ls config.out', dir='%(thisdocdir)s/subdir')"""
(built, d) = self.buildRecipe(recipestr1, "Foo")
compNames = [x[0] for x in built]
for i in range(1, 6):
assert('foo:comp%d' % i in compNames)
for i in range(1, 3):
assert('package2:comp%d' % i in compNames)
def testFailDirSyntax(self):
# set component on build dir file
# set bad mode on build dir file
emptyRecipe = r"""
class TestDirSyntax(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('/asdf/foo', contents='abc\n')
r.Create('asdf/foo', contents='abc\n')
"""
recipe2 = emptyRecipe + 'r.Create("asdf/bar", mode=02755)'
self.assertRaises(RuntimeError, self.buildRecipe,
recipe2, "TestDirSyntax")
recipe2 = emptyRecipe + 'r.Create("asdf/bar", component=":lib")'
self.assertRaises(RuntimeError, self.buildRecipe,
recipe2, "TestDirSyntax")
recipe2 = emptyRecipe + 'r.Link("foo", "asdf/bar")'
self.assertRaises(TypeError, self.buildRecipe,
recipe2, "TestDirSyntax")
class DesktopfileTest(rephelp.RepositoryHelper):
def testDesktopfileTest1(self):
"""
Test Desktopfile
"""
recipestr1 = """
class TestDesktopfile(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
#buildRequires = [ 'desktop-file-utils:runtime' ]
def setup(r):
r.Create('foo.desktop', contents='''\
[Desktop Entry]
Encoding=UTF-8
Name=Foo
Comment=Foo Bar
Exec=foo
Icon=foo
Terminal=false
Type=Application
Categories=Presentation;Java;
''')
r.Desktopfile('foo.desktop')
r.Create('%(datadir)s/foo/foo.png')
"""
self.addComponent("desktop-file-utils:runtime", "1", fileContents = [
("/usr/bin/desktop-file-validate", "somecontent"),
("/usr/bin/desktop-file-install", "someothercontent")
])
reportedBuildReqs = set()
self.mock(packagepolicy.reportMissingBuildRequires, 'updateArgs',
lambda *args:
mockedUpdateArgs(args[0], reportedBuildReqs, *args[1:]))
self.updatePkg(self.rootDir, ["desktop-file-utils:runtime"])
self.build(recipestr1, "TestDesktopfile")
self.assertEquals(reportedBuildReqs, set(('desktop-file-utils:runtime',)))
reportedBuildReqs = set()
recipestr2 = recipestr1.replace('#buildR', 'buildR')
self.build(recipestr2, "TestDesktopfile")
self.assertEquals(reportedBuildReqs, set())
class XInetdServiceTest(rephelp.RepositoryHelper):
def testXInetdServiceTest1(self):
"""
Test XInetdService
"""
recipestr1 = """
class TestXInetdService(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.XInetdService('foo', 'the foo service')
"""
self.assertRaises(errors.CookError, self.buildRecipe,
recipestr1, "TestXInetdService")
recipestr2 = """
class TestXInetdService(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.XInetdService('foo',
'the foo service with a really long description to see'
' how well textwrap does at formatting the description'
' string into a chkconfig-compatible description string.',
server='/usr/bin/foo', port='1079', protocol='tcp',
otherlines=['foo=bar', 'baz=blah'])
"""
self.reset()
(built, d) = self.buildRecipe(recipestr2, "TestXInetdService",
ignoreDeps=True)
for p in built:
self.updatePkg(self.workDir, p[0], p[1], depCheck=False)
f = file(self.workDir + '/etc/xinetd.d/foo')
l = [ x[:-1] for x in f.readlines() ]
e = ['# default: False',
'# description: the foo service with a really long description to see \\',
'# how well textwrap does at formatting the description \\',
'# string into a chkconfig-compatible description string.',
'',
'service foo',
'{',
'\tprotocol\t= tcp',
'\tport\t\t= 1079',
'\tserver\t\t= /usr/bin/foo',
'\twait\t\t= no',
'\tdisable\t\t= yes',
'\tfoo=bar',
'\tbaz=blah',
'}'
]
for found, expected in zip(l, e):
assert(found == expected)
class MakeDevicesTest(rephelp.RepositoryHelper):
def testMakeDevicesTest1(self):
recipestr1 = """
class TestMakeDevices(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.MakeDevices('/dev/foo', 'c', 1, 2, 'root', 'root')
# add a dev to a libdir to make sure it works (CNY-2692)
r.MakeDevices('/lib/dev/foo', 'c', 1, 2, 'root', 'root')
r.MakeDevices('/lib64/dev/foo', 'c', 1, 2, 'root', 'root')
"""
built, d = self.buildRecipe(recipestr1, "TestMakeDevices")
class RunTest(rephelp.RepositoryHelper):
def testRunTest1(self):
recipestr1 = """
class TestRun(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('/asdf/ajkl', contents='bar')
r.Run('if cat /asdf/ajkl ; then : else exit 1 ; fi')
r.Run('cat /asdf/ajkl | grep bar', filewrap=True)
r.Run('cat /asdf/ajkl | grep bar', wrapdir='/asdf')
r.Run('if cat /asdf/ajkl ; then : else exit 1 ; fi', wrapdir='/fdsa')
"""
built, d = self.buildRecipe(recipestr1, "TestRun")
def testRunReadFromStdin(self):
recipestr = r"""
class TestPackage(PackageRecipe):
name = 'test'
version = '1'
clearBuildReqs()
def setup(r):
r.Create("/usr/foo", contents="some text\n")
r.Run("cat")
"""
os.chdir(self.workDir)
self.newpkg("test")
os.chdir('test')
self.writeFile('test.recipe', recipestr)
self.addfile('test.recipe')
self.commit()
self.cookFromRepository('test', logBuild=True)
# Same deal, without logging - should still work
self.cookFromRepository('test', logBuild=False)
class PythonSetupTest(rephelp.RepositoryHelper):
def testPythonSetupS0(self):
recipestr = r"""
class Test(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('setup.py', contents='\n'.join((
'#import setuptools',
'file("%(destdir)s/setuptools", "w").close()',
'')))
r.PythonSetup()
"""
# missing buildreq, but test that it runs anyway
trv = self.build(recipestr, "Test")
self.verifyPackageFileList(trv, ['/setuptools'])
def testPythonSetupS1(self):
recipestr = r"""
class Test(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('setup.py', contents='\n'.join((
'#from setuptools import setup',
'file("%(destdir)s/setuptools", "w").close()',
'')))
r.PythonSetup()
"""
# missing buildreq, but test that it runs anyway
trv = self.build(recipestr, "Test")
self.verifyPackageFileList(trv, ['/setuptools'])
def testPythonSetupD(self):
try:
__import__('setuptools')
except ImportError:
raise testhelp.SkipTestException('Missing python-setuptools package')
recipestr = r"""
class Test(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('blah/setupfoo.py', contents='\n'.join((
'#import disttools',
'file("%(destdir)s/disttools", "w").close()',
'')))
r.PythonSetup(setupName='setupfoo.py', dir='blah', action='whatever',
rootDir='')
"""
# missing buildreq, but test that it runs anyway
trv = self.build(recipestr, "Test")
self.verifyPackageFileList(trv, ['/disttools'])
def testPythonSetupPyVer(self):
recipestr = r"""
class Test(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.macros.pyver = '%(pversion)s'
r.Create('setup.py', contents='\n'.join((
'#from setuptools import setup',
'file("%%(destdir)s/setuptools", "w").close()',
'')))
r.PythonSetup()
"""
# we should fail with a bogus python version
self.assertRaises(RuntimeError, self.build, recipestr %
{'pversion':'bogus'}, "Test")
# we should succeed with a the currently running python version
trv = self.build(recipestr % {'pversion': sys.version[0:3]}, "Test")
assert trv is not None
class TestMakeFIFO(rephelp.RepositoryHelper):
"""test CNY-1597"""
def testMakeFIFOBuildAction(self):
recipestr = """
class FIFO(PackageRecipe):
name = 'fifo'
version = '6.2.2.4'
clearBuildReqs()
def setup(r):
r.MakeFIFO('/path/to/my/spiffy/named/pipe')
r.Run('test -p %(destdir)s/path/to/my/spiffy/named/pipe')
r.MakeFIFO('this/is/a/path/relative/to/builddir/pipe')
r.Run('test -p this/is/a/path/relative/to/builddir/pipe')
"""
self.buildRecipe(recipestr, 'FIFO')
def testPythonSetupNonPure(self):
try:
__import__('setuptools')
except ImportError:
raise testhelp.SkipTestException('Missing python-setuptools package')
recipestr = r"""
class Test(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.macros.lib = 'lib64'
# test pre-existing python-specific multilib breakage
r.Create('%(prefix)s/lib/python2.4/site-packages/a')
r.Create('%(prefix)s/lib64/python2.4/site-packages/b')
# test multilib breakage that purePython=False could help
r.Create('setup.py', contents='\n'.join((
'#import setuptools',
'file("%(destdir)s%(prefix)s/lib/python2.4/site-packages/foo", "w").close()',
'file("%(destdir)s%(prefix)s/lib64/python2.4/site-packages/bar", "w").close()',
'')))
r.PythonSetup()
"""
# missing buildreq, but test that it runs anyway
self.logFilter.add()
self.assertRaises(policy.PolicyError, self.buildRecipe,
recipestr, "Test")
self.logFilter.remove()
assert(os.path.exists(util.joinPaths(self.buildDir,
'test/_ROOT_/usr/lib/python2.4/site-packages/foo')))
assert(os.path.exists(util.joinPaths(self.buildDir,
'test/_ROOT_/usr/lib64/python2.4/site-packages/bar')))
self.logFilter.regexpCompare([
r'error: Python and object files detected in different directories before PythonSetup\(\) instance on line 18',
r'error: Python and object files detected in different directories on line 18; call all instances of PythonSetup\(\) with the purePython=False argument',
'error: NonMultilibComponent: .*',
])
class SetModesTest(rephelp.RepositoryHelper):
def testSetModesTest1(self):
"""
Test _PutFiles
"""
recipestr = """
class TestSetModes(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('/usr/bin/foo')
r.SetModes('/usr/bin/foo', 04751)
r.Create('/foo/bar')
r.SetModes('/foo/bar', 07755)
r.SetModes('/foo/bar', 0755)
"""
built, d = self.buildRecipe(recipestr, "TestSetModes")
permMap = { '/usr/bin/foo': 04751,
'/foo/bar': 0755 }
repos = self.openRepository()
for troveName, troveVersion, troveFlavor in built:
troveVersion = versions.VersionFromString(troveVersion)
trove = repos.getTrove(troveName, troveVersion, troveFlavor)
for pathId, path, fileId, version, fileObj in repos.iterFilesInTrove(
trove.getName(), trove.getVersion(), trove.getFlavor(),
withFiles=True):
self.assertEquals(fileObj.inode.perms(), permMap[path])
def testSetModesTest2(self):
"""
Test _PutFiles
"""
recipestr = """
class TestSetModes(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('/usr/bin/foo')
r.SetModes('/usr/bin/foo', 755)
"""
self.logCheck(self.buildRecipe, (recipestr, "TestSetModes"),
'warning: odd permission 1363 for path /usr/bin/foo, correcting to 0755: add initial "0"?')
class TestIncludeLicense(rephelp.RepositoryHelper):
"""test CNY-1656"""
def testIncludeLicenseBuildAction(self):
try:
socket.gethostbyname('www.rpath.com')
except:
raise testhelp.SkipTestException('Test requires networking')
recipestr= """
class License(PackageRecipe):
name = 'license'
version = '1.5.7'
clearBuildReqs()
def setup(r):
r.addSource('CPL-1.0')
r.IncludeLicense(('CPL-1.0', 'CPL-1.0'))
# sha1sum of the relevent license
r.Run('test -f %(destdir)s/%(datadir)s/known-licenses/CPL-1.0/7d2ea178c5858c731bf8a026aeb880b27698b924')
"""
self.buildRecipe(recipestr, 'License')
class EnvironmentTest(rephelp.RepositoryHelper):
def testEnvironment(self):
recipestr = """
class TestEnvironment(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Environment('FOOBAR', 'BAZ')
r.Run('echo "$FOOBAR" > baz')
r.Create('foobar', contents='BAZ')
r.Run('cmp foobar baz')
"""
self.buildRecipe(recipestr, "TestEnvironment")
class TestSharedLibraryPolicy(rephelp.RepositoryHelper):
"""test CNP-45"""
def testSharedLibraryPolicy(self):
recipestr = """
class BadRecipe(AutoPackageRecipe):
name = 'badrecipe'
version = '3'
clearBuildReqs()
def setup(r):
r.Create('/path/to/a/file')
r.SharedLibrary(subtrees = '/path/to/a/file')
"""
self.logFilter.add()
self.assertRaises(policy.PolicyError, self.buildRecipe,
recipestr, "BadRecipe")
self.logFilter.remove()
assert('error: NormalizeLibrarySymlinks: The subtrees= argument takes directories only; /path/to/a/file is not a directory')
class TestMakeFIFO(rephelp.RepositoryHelper):
"""test CNY-1597"""
def testMakeFIFOBuildAction(self):
recipestr = """
class FIFO(PackageRecipe):
name = 'fifo'
version = '6.2.2.4'
clearBuildReqs()
def setup(r):
r.MakeFIFO('/path/to/my/spiffy/named/pipe')
r.Run('test -p %(destdir)s/path/to/my/spiffy/named/pipe')
r.MakeFIFO('this/is/a/path/relative/to/builddir/pipe')
r.Run('test -p this/is/a/path/relative/to/builddir/pipe')
"""
self.buildRecipe(recipestr, 'FIFO')
class TestReportMissingBuildReqs(rephelp.RepositoryHelper):
def testActionSuggestsBuildReqs(self):
# First, add a trove that provides make
self.addComponent("fakemake:runtime", "1", fileContents = [
("/usr/bin/make", "somecontent")])
self.updatePkg(self.rootDir, ["fakemake:runtime"])
# test for Run
self.addComponent("true:runtime", "2", fileContents = [
("/bin/true", "#!/bin/sh\nexit 0")])
self.updatePkg(self.rootDir, ["true:runtime"])
recipestr = """
class ActionSuggests(PackageRecipe):
name = "suggests"
version = "1"
clearBuildReqs()
placeholder = 1
def setup(r):
r.Create("Makefile", contents = '\\n'.join([
"datadir = $(DESTDIR)/usr/share",
"INSTALL = $(datadir)/suggests/suggests.txt",
"all:",
"install:",
"\\tinstall -d $(basename $(INSTALL))",
"\\techo 1 > $(INSTALL)",
]))
r.Make()
# MakePathsInstall and MakeInstall do the same thing in this case,
# but we want to exercise both classes
r.MakePathsInstall()
r.MakeInstall()
# test MakeParallelSubdir too, it doesn't really make any difference
r.MakeParallelSubdir()
r.Run('ENVVAR="a b" true') # CNY-3224
"""
reportedBuildReqs = set()
self.mock(packagepolicy.reportMissingBuildRequires, 'updateArgs',
lambda *args:
mockedUpdateArgs(args[0], reportedBuildReqs, *args[1:]))
self.build(recipestr, 'ActionSuggests')
self.assertEquals(reportedBuildReqs, set(('fakemake:runtime',
'true:runtime')))
# Same deal, with buildRequires added
recipestr2 = recipestr.replace("placeholder = 1",
"buildRequires = ['fakemake:runtime', 'true:runtime']")
reportedBuildReqs.clear()
self.build(recipestr2, 'ActionSuggests')
self.assertEquals(reportedBuildReqs, set())
def testActionSuggestsBuildReqs2(self):
# First, add a trove that provides tar and gz
fakeTroves = ['tar', 'gzip']
for comp in fakeTroves:
self.addComponent("fake%s:runtime" % comp, "1",
fileContents = [ ("/bin/%s" % comp, "%scontent" % comp)])
def checkPath(prog):
return '/bin/' + prog
self.mock(util, 'checkPath', checkPath)
self.updatePkg(self.rootDir, ["fake%s:runtime" % x for x in fakeTroves])
recipestr = """
class ActionSuggests(PackageRecipe):
name = "suggests"
version = "1"
clearBuildReqs()
placeholder = 1
def setup(r):
r.addArchive("foo-1.0.tar.gz", dir="/usr/share/foo/")
"""
reportedBuildReqs = set()
self.mock(packagepolicy.reportMissingBuildRequires, 'updateArgs',
lambda *args:
mockedUpdateArgs(args[0], reportedBuildReqs, *args[1:]))
self.build(recipestr, 'ActionSuggests')
self.assertEquals(reportedBuildReqs, set(['fakegzip:runtime',
'faketar:runtime']))
# Same deal, with buildRequires added
recipestr2 = recipestr.replace("placeholder = 1",
"buildRequires = ['fakegzip:runtime', 'faketar:runtime']")
reportedBuildReqs.clear()
self.build(recipestr2, 'ActionSuggests')
self.assertEquals(reportedBuildReqs, set())
| apache-2.0 | 1,297,650,306,132,612,000 | 32.551958 | 165 | 0.574955 | false |
hylandnp/CS7012_DISTRIBUTED_PROJECT | reducer.py | 1 | 7038 | #Listen and respond to SNMP GET/GETNEXT queries
from pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher
from pysnmp.carrier.asyncore.dgram import udp, udp6, unix
from pyasn1.codec.ber import encoder, decoder
from pysnmp.proto import api
import time, bisect
from src.map_reduce import Mapper, Reducer, Shuffler
from collections import defaultdict
import word_count
import thread
import socket
import json
mapper_1 = "10.0.0.1"
mapper_2 = "10.0.0.2"
reducer = "10.0.0.3"
manager = "10.0.0.4"
class SysDescr:
name = (1,3,6,1,2,1,1,1,0)
def __eq__(self, other): return self.name == other
def __ne__(self, other): return self.name != other
def __lt__(self, other): return self.name < other
def __le__(self, other): return self.name <= other
def __gt__(self, other): return self.name > other
def __ge__(self, other): return self.name >= other
def __call__(self, protoVer):
test = "it it it it ti ti ti ti"
ans = self.word_count(test.strip().split(" "))
# print(str(ans).strip('[]'))
return api.protoModules[protoVer].OctetString(
'Job finished Captain'
)
def group_by_word(self, words):
result = defaultdict(list)
for (word, c) in words:
result[word].append(c)
return result
def map_word(self, word):
return word, 1
def reduce_count(self, word, sequence):
return word, sum(sequence)
def word_count(self, document):
self.mapper = Mapper(self.map_word, document)
self.shuffler = Shuffler(self.group_by_word, self.mapper.run())
self.reducer = Reducer(self.reduce_count, self.shuffler.run().iteritems())
return self.reducer.run()
class Uptime:
name = (1,3,6,1,2,1,1,3,0)
birthday = time.time()
def __eq__(self, other): return self.name == other
def __ne__(self, other): return self.name != other
def __lt__(self, other): return self.name < other
def __le__(self, other): return self.name <= other
def __gt__(self, other): return self.name > other
def __ge__(self, other): return self.name >= other
def __call__(self, protoVer):
return api.protoModules[protoVer].TimeTicks(
(time.time()-self.birthday)*100
)
mibInstr = (
SysDescr(), Uptime(), # sorted by object name
)
mibInstrIdx = {}
for mibVar in mibInstr:
mibInstrIdx[mibVar.name] = mibVar
def cbFun(transportDispatcher, transportDomain, transportAddress, wholeMsg):
while wholeMsg:
msgVer = api.decodeMessageVersion(wholeMsg)
if msgVer in api.protoModules:
pMod = api.protoModules[msgVer]
else:
print('Unsupported SNMP version %s' % msgVer)
return
reqMsg, wholeMsg = decoder.decode(
wholeMsg, asn1Spec=pMod.Message(),
)
rspMsg = pMod.apiMessage.getResponse(reqMsg)
rspPDU = pMod.apiMessage.getPDU(rspMsg)
reqPDU = pMod.apiMessage.getPDU(reqMsg)
varBinds = []; pendingErrors = []
errorIndex = 0
# GETNEXT PDU
if reqPDU.isSameTypeWith(pMod.GetNextRequestPDU()):
# Produce response var-binds
for oid, val in pMod.apiPDU.getVarBinds(reqPDU):
errorIndex = errorIndex + 1
# Search next OID to report
nextIdx = bisect.bisect(mibInstr, oid)
if nextIdx == len(mibInstr):
# Out of MIB
varBinds.append((oid, val))
pendingErrors.append(
(pMod.apiPDU.setEndOfMibError, errorIndex)
)
else:
# Report value if OID is found
varBinds.append(
(mibInstr[nextIdx].name, mibInstr[nextIdx](msgVer))
)
elif reqPDU.isSameTypeWith(pMod.GetRequestPDU()):
for oid, val in pMod.apiPDU.getVarBinds(reqPDU):
if oid in mibInstrIdx:
varBinds.append((oid, mibInstrIdx[oid](msgVer)))
else:
# No such instance
varBinds.append((oid, val))
pendingErrors.append(
(pMod.apiPDU.setNoSuchInstanceError, errorIndex)
)
break
else:
# Report unsupported request type
pMod.apiPDU.setErrorStatus(rspPDU, 'genErr')
pMod.apiPDU.setVarBinds(rspPDU, varBinds)
# Commit possible error indices to response PDU
for f, i in pendingErrors:
f(rspPDU, i)
transportDispatcher.sendMessage(
encoder.encode(rspMsg), transportDomain, transportAddress
)
return wholeMsg
transportDispatcher = AsyncoreDispatcher()
transportDispatcher.registerRecvCbFun(cbFun)
# UDP/IPv4
transportDispatcher.registerTransport(
udp.domainName, udp.UdpSocketTransport().openServerMode(('10.0.0.3', 1161))
)
## Local domain socket
#transportDispatcher.registerTransport(
# unix.domainName, unix.UnixSocketTransport().openServerMode('/tmp/snmp-agent')
#)
IP = "10.0.0.3"
port = 1162
def listen_for_data():
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
sock.bind((IP, port))
while 1:
data1_recv = ""
data2_recv = ""
data, addr = sock.recvfrom(8192)
addr1 = addr
try:
while(data):
if addr == addr1:
data1_recv = data1_recv + data
print "get data from 1"
else:
data2_recv = data2_recv + data
print "get data from 2"
sock.settimeout(3)
data, addr = sock.recvfrom(8192)
except socket.timeout:
sock.close()
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
sock.bind((IP, port))
print "reducer got everything"
data1_dict = json.loads(data1_recv)
data2_dict = json.loads(data2_recv)
data_result = data1_dict.copy()
for key in data2_dict.keys():
if key in data_result:
data_result[key] = data_result[key] + data2_dict[key]
else:
data_result[key] = data2_dict[key]
reducer = Reducer(word_count.reduce_count, data_result.iteritems())
result = reducer.run()
print result
file_out = open("result.txt","w")
for word in result:
file_out.write(str(word) + "\n")
file_out.close()
def listen_for_snmp():
transportDispatcher.jobStarted(1)
try:
# Dispatcher will never finish as job#1 never reaches zero
transportDispatcher.runDispatcher()
except:
transportDispatcher.closeDispatcher()
raise
try:
thread.start_new_thread(listen_for_data,())
thread.start_new_thread(listen_for_snmp,())
except:
raise
print "Error to start thread"
while 1:
pass
| mit | 280,178,073,929,246,180 | 31.583333 | 82 | 0.583262 | false |
pmuellr/nitro_pie | test/test_iss.py | 1 | 3753 | #!/usr/bin/env python
#-------------------------------------------------------------------
# The MIT License
#
# Copyright (c) 2009 Patrick Mueller
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#-------------------------------------------------------------------
import os
import sys
lib_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../lib"))
if lib_path not in sys.path: sys.path.insert(0, lib_path)
import unittest
from nitro_pie import *
from test_utils import *
#-------------------------------------------------------------------
class Test(unittest.TestCase):
#---------------------------------------------------------------
def setUp(self):
self.ctx = JSGlobalContextRef.create()
def tearDown(self):
self.ctx.release()
#---------------------------------------------------------------
def test_isEqual(self):
ctx = self.ctx
o1 = ctx.eval("({})")
o2 = ctx.eval("({})")
self.assertTrue( o1.isEqual(ctx,o1))
self.assertTrue( o2.isEqual(ctx,o2))
self.assertFalse(o1.isEqual(ctx,o2))
#---------------------------------------------------------------
def test_isStrictEqual(self):
ctx = self.ctx
o1 = ctx.eval("({})")
o2 = ctx.eval("({})")
self.assertTrue( o1.isStrictEqual(ctx,o1))
self.assertTrue( o2.isStrictEqual(ctx,o2))
self.assertFalse(o1.isStrictEqual(ctx,o2))
#---------------------------------------------------------------
def test_isInstanceOf(self):
ctx = self.ctx
a = ctx.eval("[]")
array = ctx.eval("Array").asJSObjectRef(ctx)
self.assertTrue(a.isInstanceOf(ctx,array))
#---------------------------------------------------------------
def test_isFunction(self):
ctx = self.ctx
a = ctx.eval("[]").asJSObjectRef(ctx)
f = ctx.eval("(function() {})").asJSObjectRef(ctx)
self.assertFalse(a.isFunction(ctx))
self.assertTrue(f.isFunction(ctx))
#---------------------------------------------------------------
def test_isConstructor(self):
ctx = self.ctx
a = ctx.eval("[]").asJSObjectRef(ctx)
f = ctx.eval("(function() {})").asJSObjectRef(ctx)
array = ctx.eval("Array").asJSObjectRef(ctx)
self.assertFalse(a.isConstructor(ctx))
self.assertTrue(f.isConstructor(ctx))
self.assertTrue(array.isConstructor(ctx))
self.assertTrue(array.isFunction(ctx))
#-------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| mit | 9,065,262,556,464,248,000 | 34.40566 | 80 | 0.52145 | false |
okanokumus/Programming | Python/IntroductionPython/variable_types.py | 1 | 1599 | # https://www.tutorialspoint.com/python/python_variable_types.htm
# Python variables do not need explicit declaration to reserve memory space.
# The declaration happens automatically when you assign a value to a variable.
# The equal sign (=) is used to assign values to variables.
number = 5
weight = 58.9
name = 'python'
print(weight)
print(number)
print(name)
# Multiple assignment
a = b = c = number
print(a * b * c)
string = 'Hello World !!!'
print (string[1:7])
print (string[5:]) # Prints string starting from 3rd character
print (string * 2 ) # Prints string two times
print (string + "python")
# Python Lists
list = [ 'abcd', 786 , 2.23, 'john', 70.2 ]
tinylist = [123, 'john']
print (list) # Prints complete list
print (list[1:3]) # Prints elements starting from 2nd till 3rd
print (list[2:]) # Prints elements starting from 3rd element
print (tinylist * 2) # Prints list two times
print (list + tinylist) # Prints concatenated lists
# Python Tuples
# The main differences between lists and tuples are: Lists are enclosed in brackets ( [ ] ) and
# their elements and size can be changed, while tuples are enclosed in parentheses ( ( ) ) and
# cannot be updated. Tuples can be thought of as read-only lists.
tuple = ('abcd', 786, 2.23, 'john', 70.2)
tinytuple = (123, 'john')
print(tuple) # Prints complete list
print(tuple[1:3]) # Prints elements starting from 2nd till 3rd
print(tuple[2:]) # Prints elements starting from 3rd element
print(tinytuple * 2) # Prints list two times
print(tuple + tinytuple) # Prints concatenated lists | gpl-3.0 | -2,425,228,713,200,995,300 | 33.782609 | 95 | 0.699812 | false |
cthoyt/onto2nx | src/onto2nx/ontospy/core/entities.py | 1 | 12106 | # !/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
from itertools import count
from .utils import *
class RDF_Entity(object):
"""
Pythonic representation of an RDF resource - normally not instantiated but used for
inheritance purposes
<triples> : a structure like this:
[(rdflib.term.URIRef(u'http://xmlns.com/foaf/0.1/OnlineChatAccount'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#comment'),
rdflib.term.Literal(u'An online chat account.')),
(rdflib.term.URIRef(u'http://xmlns.com/foaf/0.1/OnlineChatAccount'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#subClassOf')]
"""
_ids = count(0)
def __repr__(self):
return "<OntoSpy: RDF_Entity object for uri *%s*>" % (self.uri)
def __init__(self, uri, rdftype=None, namespaces=None):
"""
Init ontology object. Load the graph in memory, then setup all necessary attributes.
"""
self.id = next(self._ids)
self.uri = uri # rdflib.Uriref
self.qname = self.__buildQname(namespaces)
self.locale = inferURILocalSymbol(self.uri)[0]
self.slug = None
self.rdftype = rdftype
self.triples = None
self.rdfgraph = rdflib.Graph()
self.namespaces = namespaces
self._children = []
self._parents = []
# self.siblings = []
def serialize(self, format="turtle"):
""" xml, n3, turtle, nt, pretty-xml, trix are built in"""
if self.triples:
if not self.rdfgraph:
self._buildGraph()
return self.rdfgraph.serialize(format=format)
else:
return None
def printSerialize(self, format="turtle"):
printDebug("\n" + self.serialize(format))
def __buildQname(self, namespaces):
""" extracts a qualified name for a uri """
return uri2niceString(self.uri, namespaces)
def _buildGraph(self):
"""
transforms the triples list into a proper rdflib graph
(which can be used later for querying)
"""
for n in self.namespaces:
self.rdfgraph.bind(n[0], rdflib.Namespace(n[1]))
if self.triples:
for terzetto in self.triples:
self.rdfgraph.add(terzetto)
# methods added to RDF_Entity even though they apply only to some subs
def ancestors(self, cl=None, noduplicates=True):
""" returns all ancestors in the taxonomy """
if not cl:
cl = self
if cl.parents():
bag = []
for x in cl.parents():
if x.uri != cl.uri: # avoid circular relationships
bag += [x] + self.ancestors(x, noduplicates)
else:
bag += [x]
# finally:
if noduplicates:
return remove_duplicates(bag)
else:
return bag
else:
return []
def descendants(self, cl=None, noduplicates=True):
""" returns all descendants in the taxonomy """
if not cl:
cl = self
if cl.children():
bag = []
for x in cl.children():
if x.uri != cl.uri: # avoid circular relationships
bag += [x] + self.descendants(x, noduplicates)
else:
bag += [x]
# finally:
if noduplicates:
return remove_duplicates(bag)
else:
return bag
else:
return []
def parents(self):
"""wrapper around property"""
return self._parents
def children(self):
"""wrapper around property"""
return self._children
def getValuesForProperty(self, aPropURIRef):
"""
generic way to extract some prop value eg
In [11]: c.getValuesForProperty(rdflib.RDF.type)
Out[11]:
[rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#Class'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#Class')]
"""
return list(self.rdfgraph.objects(None, aPropURIRef))
def bestLabel(self, prefLanguage="en", qname_allowed=True, quotes=True):
"""
facility for extrating the best available label for an entity
..This checks RFDS.label, SKOS.prefLabel and finally the qname local component
"""
test = self.getValuesForProperty(rdflib.RDFS.label)
out = ""
if test:
out = firstEnglishStringInList(test)
else:
test = self.getValuesForProperty(rdflib.namespace.SKOS.prefLabel)
if test:
out = firstEnglishStringInList(test)
else:
if qname_allowed:
out = self.locale
if quotes and out:
return addQuotes(out)
else:
return out
def bestDescription(self, prefLanguage="en"):
"""
facility for extrating the best available description for an entity
..This checks RFDS.label, SKOS.prefLabel and finally the qname local component
"""
test_preds = [rdflib.RDFS.comment, rdflib.namespace.DCTERMS.description, rdflib.namespace.DC.description,
rdflib.namespace.SKOS.definition]
for pred in test_preds:
test = self.getValuesForProperty(pred)
if test:
return addQuotes(firstEnglishStringInList(test))
return ""
class Ontology(RDF_Entity):
"""
Pythonic representation of an OWL ontology
"""
def __repr__(self):
return "<OntoSpy: Ontology object for uri *%s*>" % (self.uri)
def __init__(self, uri, rdftype=None, namespaces=None, prefPrefix=""):
"""
Init ontology object. Load the graph in memory, then setup all necessary attributes.
"""
super(Ontology, self).__init__(uri, rdftype, namespaces)
# self.uri = uri # rdflib.Uriref
self.prefix = prefPrefix
self.slug = "ontology-" + slugify(self.qname)
self.classes = []
self.properties = []
self.skosConcepts = []
def annotations(self, qname=True):
"""
wrapper that returns all triples for an onto.
By default resources URIs are transformed into qnames
"""
if qname:
return sorted([(uri2niceString(x, self.namespaces)), (uri2niceString(y, self.namespaces)), z] for x, y, z in
self.triples)
else:
return sorted(self.triples)
def describe(self):
""" shotcut to pull out useful info for interactive use """
# self.printGenericTree()
self.printTriples()
self.stats()
def stats(self):
""" shotcut to pull out useful info for interactive use """
printDebug("Classes.....: %d" % len(self.classes))
printDebug("Properties..: %d" % len(self.properties))
class OntoClass(RDF_Entity):
"""
Python representation of a generic class within an ontology.
Includes methods for representing and querying RDFS/OWL classes
domain_of_inferred: a list of dict
[{<Class *http://xmlns.com/foaf/0.1/Person*>:
[<Property *http://xmlns.com/foaf/0.1/currentProject*>,<Property *http://xmlns.com/foaf/0.1/familyName*>,
etc....]},
{<Class *http://www.w3.org/2003/01/geo/wgs84_pos#SpatialThing*>:
[<Property *http://xmlns.com/foaf/0.1/based_near*>, etc...]},
]
"""
def __init__(self, uri, rdftype=None, namespaces=None):
"""
...
"""
super(OntoClass, self).__init__(uri, rdftype, namespaces)
self.slug = "class-" + slugify(self.qname)
self.domain_of = []
self.range_of = []
self.domain_of_inferred = []
self.range_of_inferred = []
self.ontology = None
self.queryHelper = None # the original graph the class derives from
def __repr__(self):
return "<Class *%s*>" % (self.uri)
def instances(self): # = all instances
return self.all()
def all(self):
out = []
if self.queryHelper:
qres = self.queryHelper.getClassInstances(self.uri)
out = [x[0] for x in qres]
return out
def count(self):
if self.queryHelper:
return self.queryHelper.getClassInstancesCount(self.uri)
else:
return 0
def printStats(self):
""" shotcut to pull out useful info for interactive use """
printDebug("----------------")
printDebug("Parents......: %d" % len(self.parents()))
printDebug("Children.....: %d" % len(self.children()))
printDebug("Ancestors....: %d" % len(self.ancestors()))
printDebug("Descendants..: %d" % len(self.descendants()))
printDebug("Domain of....: %d" % len(self.domain_of))
printDebug("Range of.....: %d" % len(self.range_of))
printDebug("Instances....: %d" % self.count())
printDebug("----------------")
def describe(self):
""" shotcut to pull out useful info for interactive use """
self.printTriples()
self.printStats()
# self.printGenericTree()
class OntoProperty(RDF_Entity):
"""
Python representation of a generic RDF/OWL property.
rdftype is one of:
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#ObjectProperty')
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#DatatypeProperty')
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#AnnotationProperty')
rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Property')
"""
def __init__(self, uri, rdftype=None, namespaces=None):
"""
...
"""
super(OntoProperty, self).__init__(uri, rdftype, namespaces)
self.slug = "prop-" + slugify(self.qname)
self.rdftype = inferMainPropertyType(rdftype)
self.domains = []
self.ranges = []
self.ontology = None
def __repr__(self):
return "<Property *%s*>" % (self.uri)
def printStats(self):
""" shotcut to pull out useful info for interactive use """
printDebug("----------------")
printDebug("Parents......: %d" % len(self.parents()))
printDebug("Children.....: %d" % len(self.children()))
printDebug("Ancestors....: %d" % len(self.ancestors()))
printDebug("Descendants..: %d" % len(self.descendants()))
printDebug("Has Domain...: %d" % len(self.domains))
printDebug("Has Range....: %d" % len(self.ranges))
printDebug("----------------")
def describe(self):
""" shotcut to pull out useful info for interactive use """
self.printTriples()
self.printStats()
# self.printGenericTree()
class OntoSKOSConcept(RDF_Entity):
"""
Python representation of a generic SKOS concept within an ontology.
@todo: complete methods..
"""
def __init__(self, uri, rdftype=None, namespaces=None):
"""
...
"""
super(OntoSKOSConcept, self).__init__(uri, rdftype, namespaces)
self.slug = "concept-" + slugify(self.qname)
self.instance_of = []
self.ontology = None
self.queryHelper = None # the original graph the class derives from
def __repr__(self):
return "<SKOS Concept *%s*>" % (self.uri)
def printStats(self):
""" shotcut to pull out useful info for interactive use """
printDebug("----------------")
printDebug("Parents......: %d" % len(self.parents()))
printDebug("Children.....: %d" % len(self.children()))
printDebug("Ancestors....: %d" % len(self.ancestors()))
printDebug("Descendants..: %d" % len(self.descendants()))
printDebug("----------------")
def describe(self):
""" shotcut to pull out useful info for interactive use """
self.printTriples()
self.printStats()
self.printGenericTree()
| gpl-3.0 | -5,157,829,616,975,822,000 | 32.076503 | 120 | 0.566331 | false |
UASLab/ImageAnalysis | scripts/archive/5b-solver5.py | 1 | 23350 | #!/usr/bin/python
# 1. Iterate through all the image pairs and triangulate the match points.
# 2. Set the 3d location of features to triangulated position (possibly
# averaged if the feature is included in multiple matches
# 3. Compute new camera poses with solvePnP() using triangulated point locations
# 4. Repeat
import sys
sys.path.insert(0, "/usr/local/lib/python2.7/site-packages/")
import argparse
import commands
import cPickle as pickle
import cv2
import fnmatch
import json
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import os.path
from progress.bar import Bar
import scipy.spatial
sys.path.append('../lib')
import Matcher
import Pose
import ProjectMgr
import SRTM
import transformations
# constants
d2r = math.pi / 180.0
parser = argparse.ArgumentParser(description='Keypoint projection.')
parser.add_argument('--project', required=True, help='project directory')
parser.add_argument('--strategy', default='my_triangulate',
choices=['my_triangulate', 'triangulate', 'dem'], help='projection strategy')
parser.add_argument('--iterations', type=int, help='stop after this many solver iterations')
parser.add_argument('--target-mre', type=float, help='stop when mre meets this threshold')
parser.add_argument('--plot', action='store_true', help='plot the solution state')
args = parser.parse_args()
proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
proj.load_features()
proj.undistort_keypoints()
matches_direct = pickle.load( open( args.project + "/matches_direct", "rb" ) )
print "unique features:", len(matches_direct)
# compute keypoint usage map
proj.compute_kp_usage_new(matches_direct)
# setup SRTM ground interpolator
ref = proj.ned_reference_lla
sss = SRTM.NEDGround( ref, 2000, 2000, 30 )
start_mre = -1.0
# iterate through the matches list and triangulate the 3d location for
# all feature points, given the associated camera poses. Returns a
# new matches_dict with update point positions
import LineSolver
def my_triangulate(matches_direct, cam_dict):
IK = np.linalg.inv( proj.cam.get_K() )
for match in matches_direct:
#print match
points = []
vectors = []
for m in match[1:]:
image = proj.image_list[m[0]]
cam2body = image.get_cam2body()
body2ned = image.rvec_to_body2ned(cam_dict[image.name]['rvec'])
uv_list = [ image.uv_list[m[1]] ] # just one uv element
vec_list = proj.projectVectors(IK, body2ned, cam2body, uv_list)
points.append( cam_dict[image.name]['ned'] )
vectors.append( vec_list[0] )
#print ' ', image.name
#print ' ', uv_list
#print ' ', vec_list
p = LineSolver.ls_lines_intersection(points, vectors, transpose=True).tolist()
#print p, p[0]
match[0] = [ p[0][0], p[1][0], p[2][0] ]
# iterate through the project image list and triangulate the 3d
# location for all feature points, given the current camera pose.
# Returns a new matches_dict with update point positions
def triangulate(matches_direct, cam_dict):
IK = np.linalg.inv( proj.cam.get_K() )
match_pairs = proj.generate_match_pairs(matches_direct)
# zero the match NED coordinate and initialize the corresponding
# count array
counters = []
for match in matches_direct:
match[0] = np.array( [0.0, 0.0, 0.0] )
counters.append( 0)
for i, i1 in enumerate(proj.image_list):
#rvec1, tvec1 = i1.get_proj()
rvec1 = cam_dict[i1.name]['rvec']
tvec1 = cam_dict[i1.name]['tvec']
R1, jac = cv2.Rodrigues(rvec1)
PROJ1 = np.concatenate((R1, tvec1), axis=1)
for j, i2 in enumerate(proj.image_list):
matches = match_pairs[i][j]
if (j <= i) or (len(matches) == 0):
continue
# distance between two cameras
ned1 = np.array(cam_dict[i1.name]['ned'])
ned2 = np.array(cam_dict[i2.name]['ned'])
dist = np.linalg.norm(ned2 - ned1)
if dist < 40:
# idea: the closer together two poses are, the greater
# the triangulation error will be relative to small
# attitude errors. If we only compare more distance
# camera views the solver will be more stable.
continue
#rvec2, tvec2 = i2.get_proj()
rvec2 = cam_dict[i2.name]['rvec']
tvec2 = cam_dict[i2.name]['tvec']
R2, jac = cv2.Rodrigues(rvec2)
PROJ2 = np.concatenate((R2, tvec2), axis=1)
uv1 = []; uv2 = []; indices = []
for pair in matches:
p1 = i1.kp_list[ pair[0] ].pt
p2 = i2.kp_list[ pair[1] ].pt
uv1.append( [p1[0], p1[1], 1.0] )
uv2.append( [p2[0], p2[1], 1.0] )
# pair[2] is the index back into the matches_direct structure
indices.append( pair[2] )
pts1 = IK.dot(np.array(uv1).T)
pts2 = IK.dot(np.array(uv2).T)
points = cv2.triangulatePoints(PROJ1, PROJ2, pts1[:2], pts2[:2])
points /= points[3]
#print "points:\n", points[0:3].T
# fixme: need to update result, sum_dict is no longer used
print "%s vs %s" % (i1.name, i2.name)
for k, p in enumerate(points[0:3].T):
match = matches_direct[indices[k]]
match[0] += p
counters[indices[k]] += 1
# divide each NED coordinate (sum of triangulated point locations)
# of matches_direct_dict by the count of references to produce an
# average NED coordinate for each match.
for i, match in enumerate(matches_direct):
if counters[i] > 0:
match[0] /= counters[i]
else:
print 'invalid match from images too close to each other:', match
for j in range(1, len(match)):
match[j] = [-1, -1]
# return the new match structure
return matches_direct
# Iterate through the project image list and run solvePnP on each
# image's feature set to derive new estimated camera locations
cam1 = []
def solvePnP(matches_direct):
# start with a clean slate
for image in proj.image_list:
image.img_pts = []
image.obj_pts = []
# build a new cam_dict that is a copy of the current one
cam_dict = {}
for image in proj.image_list:
cam_dict[image.name] = {}
rvec, tvec = image.get_proj()
ned, ypr, quat = image.get_camera_pose()
cam_dict[image.name]['rvec'] = rvec
cam_dict[image.name]['tvec'] = tvec
cam_dict[image.name]['ned'] = ned
# iterate through the match dictionary and build a per image list of
# obj_pts and img_pts
for match in matches_direct:
ned = match[0]
for p in match[1:]:
image = proj.image_list[ p[0] ]
kp = image.kp_list[ p[1] ]
image.img_pts.append( kp.pt )
image.obj_pts.append( ned )
camw, camh = proj.cam.get_image_params()
for image in proj.image_list:
# print image.name
if len(image.img_pts) < 4:
continue
scale = float(image.width) / float(camw)
K = proj.cam.get_K(scale)
rvec, tvec = image.get_proj()
(result, rvec, tvec) \
= cv2.solvePnP(np.float32(image.obj_pts),
np.float32(image.img_pts),
K, None,
rvec, tvec, useExtrinsicGuess=True)
# The idea of using the Ransac version of solvePnP() is to
# look past outliers instead of being affected by them. We
# don't use the outlier information at this point in the
# process for outlier rejection. However, it appears that
# this process leads to divergence, not convergence.
# (rvec, tvec, inliers) \
# = cv2.solvePnPRansac(np.float32(image.obj_pts),
# np.float32(image.img_pts),
# K, None,
# rvec, tvec, useExtrinsicGuess=True)
#print "rvec=", rvec
#print "tvec=", tvec
Rned2cam, jac = cv2.Rodrigues(rvec)
#print "Rraw (from SolvePNP):\n", Rraw
ned = image.camera_pose['ned']
#print "original ned = ", ned
#tvec = -np.matrix(R[:3,:3]) * np.matrix(ned).T
#print "tvec =", tvec
pos = -np.matrix(Rned2cam[:3,:3]).T * np.matrix(tvec)
newned = pos.T[0].tolist()[0]
#print "new ned =", newned
# Our Rcam matrix (in our ned coordinate system) is body2cam * Rned,
# so solvePnP returns this combination. We can extract Rned by
# premultiplying by cam2body aka inv(body2cam).
cam2body = image.get_cam2body()
Rned2body = cam2body.dot(Rned2cam)
#print "R (after M * R):\n", R
ypr = image.camera_pose['ypr']
#print "original ypr = ", ypr
Rbody2ned = np.matrix(Rned2body).T
IRo = transformations.euler_matrix(ypr[0]*d2r, ypr[1]*d2r, ypr[2]*d2r, 'rzyx')
IRq = transformations.quaternion_matrix(image.camera_pose['quat'])
#print "Original IR:\n", IRo
#print "Original IR (from quat)\n", IRq
#print "IR (from SolvePNP):\n", IR
(yaw, pitch, roll) = transformations.euler_from_matrix(Rbody2ned, 'rzyx')
#print "ypr =", [yaw/d2r, pitch/d2r, roll/d2r]
#image.set_camera_pose( pos.T[0].tolist(), [yaw/d2r, pitch/d2r, roll/d2r] )
#print "Proj =", np.concatenate((R, tvec), axis=1)
cam_dict[image.name] = {}
cam_dict[image.name]['rvec'] = rvec
cam_dict[image.name]['tvec'] = tvec
cam_dict[image.name]['ned'] = newned
return cam_dict
# return a 3d affine tranformation between fitted camera locations and
# original camera locations.
def get_recenter_affine(cam_dict):
src = [[], [], [], []] # current camera locations
dst = [[], [], [], []] # original camera locations
for image in proj.image_list:
if image.feature_count > 0:
newned = cam_dict[image.name]['ned']
src[0].append(newned[0])
src[1].append(newned[1])
src[2].append(newned[2])
src[3].append(1.0)
origned, ypr, quat = image.get_camera_pose()
dst[0].append(origned[0])
dst[1].append(origned[1])
dst[2].append(origned[2])
dst[3].append(1.0)
#print image.name, '%s -> %s' % (origned, newned)
A = transformations.superimposition_matrix(src, dst, scale=True)
print "Affine 3D:\n", A
return A
# transform the camera ned positions with the provided affine matrix
# to keep all the camera poses best fitted to the original camera
# locations. Also rotate the camera poses by the rotational portion
# of the affine matrix to update the camera alignment.
def transform_cams(A, cam_dict):
# construct an array of camera positions
src = [[], [], [], []]
for image in proj.image_list:
new = cam_dict[image.name]['ned']
src[0].append(new[0])
src[1].append(new[1])
src[2].append(new[2])
src[3].append(1.0)
# extract the rotational portion of the affine matrix
scale, shear, angles, trans, persp = transformations.decompose_matrix(A)
R = transformations.euler_matrix(*angles)
#print "R:\n", R
# full transform the camera ned positions to best align with
# original locations
update_cams = A.dot( np.array(src) )
#print update_cams[:3]
for i, p in enumerate(update_cams.T):
key = proj.image_list[i].name
if not key in cam_dict:
cam_dict[key] = {}
ned = [ p[0], p[1], p[2] ]
# print "ned:", ned
cam_dict[key]['ned'] = ned
# adjust the camera projection matrix (rvec) to rotate by the
# amount of the affine transformation as well
rvec = cam_dict[key]['rvec']
tvec = cam_dict[key]['tvec']
Rcam, jac = cv2.Rodrigues(rvec)
# print "Rcam:\n", Rcam
Rcam_new = R[:3,:3].dot(Rcam)
# print "Rcam_new:\n", Rcam_new
rvec, jac = cv2.Rodrigues(Rcam_new)
cam_dict[key]['rvec'] = rvec
tvec = -np.matrix(Rcam_new) * np.matrix(ned).T
cam_dict[key]['tvec'] = tvec
# transform all the match point locations
def transform_points( A, pts_dict ):
src = [[], [], [], []]
for key in pts_dict:
p = pts_dict[key]
src[0].append(p[0])
src[1].append(p[1])
src[2].append(p[2])
src[3].append(1.0)
dst = A.dot( np.array(src) )
result_dict = {}
for i, key in enumerate(pts_dict):
result_dict[key] = [ dst[0][i], dst[1][i], dst[2][i] ]
return result_dict
# mark items that exceed the cutoff reprojection error for deletion
def mark_outliers(result_list, cutoff, matches_direct):
print " marking outliers..."
mark_count = 0
for line in result_list:
# print "line:", line
if line[0] > cutoff:
print " outlier index %d-%d err=%.2f" % (line[1], line[2],
line[0])
#if args.show:
# draw_match(line[1], line[2])
match = matches_direct[line[1]]
match[line[2]+1] = [-1, -1]
mark_count += 1
# mark matches not referencing images in the main group
def mark_non_group(main_group, matches_direct):
# construct set of image indices in main_group
group_dict = {}
for image in main_group:
for i, i1 in enumerate(proj.image_list):
if image == i1:
group_dict[i] = True
#print 'group_dict:', group_dict
print " marking non group..."
mark_sum = 0
for match in matches_direct:
for j, p in enumerate(match[1:]):
if not p[0] in group_dict:
match[j+1] = [-1, -1]
mark_sum += 1
print 'marked:', mark_sum, 'matches for deletion'
# delete marked matches
def delete_marked_matches(matches_direct):
print " deleting marked items..."
for i in reversed(range(len(matches_direct))):
match = matches_direct[i]
has_bad_elem = False
for j in reversed(range(1, len(match))):
p = match[j]
if p == [-1, -1]:
has_bad_elem = True
match.pop(j)
if len(match) < 4:
print "deleting match that is now in less than 3 images:", match
matches_direct.pop(i)
# any image with less than 25 matches has all it's matches marked for
# deletion
def mark_weak_images(matches_direct):
# count how many features show up in each image
for i in proj.image_list:
i.feature_count = 0
for i, match in enumerate(matches_direct):
for j, p in enumerate(match[1:]):
if p[1] != [-1, -1]:
image = proj.image_list[ p[0] ]
image.feature_count += 1
# make a dict of all images with less than 25 feature matches
weak_dict = {}
for i, img in enumerate(proj.image_list):
if img.feature_count < 25:
weak_dict[i] = True
if img.feature_count > 0:
print 'new weak image:', img.name
img.feature_count = 0 # will be zero very soon
print 'weak images:', weak_dict
# mark any features in the weak images list
mark_sum = 0
for i, match in enumerate(matches_direct):
#print 'before:', match
for j, p in enumerate(match[1:]):
if p[0] in weak_dict:
match[j+1] = [-1, -1]
mark_sum += 1
#print 'after:', match
def plot(surface0, cam0, surface1, cam1):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = []; ys = []; zs = []
for p in surface0:
xs.append(p[0])
ys.append(p[1])
zs.append(p[2])
ax.scatter(np.array(xs), np.array(ys), np.array(zs), c='r', marker='.')
xs = []; ys = []; zs = []
for p in surface1:
xs.append(p[0])
ys.append(p[1])
zs.append(p[2])
ax.scatter(np.array(xs), np.array(ys), np.array(zs), c='b', marker='.')
xs = []; ys = []; zs = []
for p in cam0:
xs.append(p[0])
ys.append(p[1])
zs.append(p[2])
ax.scatter(np.array(xs), np.array(ys), np.array(zs), c='y', marker='^')
xs = []; ys = []; zs = []
for p in cam1:
xs.append(p[0])
ys.append(p[1])
zs.append(p[2])
ax.scatter(np.array(xs), np.array(ys), np.array(zs), c='b', marker='^')
plt.show()
# temporary testing ....
# match_pairs = proj.generate_match_pairs(matches_direct)
# group_list = Matcher.groupByConnections(proj.image_list, matches_direct, match_pairs)
# mark_non_group(group_list[0], matches_direct)
# quit()
# iterate through the match dictionary and build a simple list of
# starting surface points
surface0 = []
for match in matches_direct:
ned = match[0]
surface0.append( [ned[1], ned[0], -ned[2]] )
cam0 = []
for image in proj.image_list:
ned, ypr, quat = image.get_camera_pose()
cam0.append( [ned[1], ned[0], -ned[2]] )
# iterate through the image list and build the camera pose dictionary
# (and a simple list of camera locations for plotting)
# cam_dict = {}
# for image in proj.image_list:
# rvec, tvec, ned = image.get_proj()
# cam_dict[image.name] = {}
# cam_dict[image.name]['rvec'] = rvec
# cam_dict[image.name]['tvec'] = tvec
# cam_dict[image.name]['ned'] = ned
count = 0
while True:
# find the 'best fit' camera poses for the triangulation averaged
# together.
cam_dict = solvePnP(matches_direct)
# measure our current mean reprojection error and trim mre
# outliers from the match set (any points with mre 4x stddev) as
# well as any weak images with < 25 matches.
(result_list, mre, stddev) \
= proj.compute_reprojection_errors(cam_dict, matches_direct)
if start_mre < 0.0: start_mre = mre
print "mre = %.4f stddev = %.4f features = %d" % (mre, stddev, len(matches_direct))
cull_outliers = False
if cull_outliers:
mark_outliers(result_list, mre + stddev*4, matches_direct)
mark_weak_images(matches_direct)
delete_marked_matches(matches_direct)
# after outlier deletion, re-evalute matched pairs and connection
# cycles.
match_pairs = proj.generate_match_pairs(matches_direct)
group_list = Matcher.groupByConnections(proj.image_list, matches_direct, match_pairs)
mark_non_group(group_list[0], matches_direct)
delete_marked_matches(matches_direct)
else:
# keep accounting structures happy
mark_weak_images(matches_direct)
# get the affine transformation required to bring the new camera
# locations back inqto a best fit with the original camera
# locations
A = get_recenter_affine(cam_dict)
# thought #1: if we are triangulating, this could be done once at the
# end to fix up the solution, not every iteration? But it doesn't
# seem to harm the triangulation.
# thought #2: if we are projecting onto the dem surface, we
# probably shouldn't transform the cams back to the original
# because this could perpetually pull things out of convergence
transform_cams(A, cam_dict)
if args.strategy == 'my_triangulate':
# run the triangulation step (modifies NED coordinates in
# place). This computes a best fit for all the feature
# locations based on the current best camera poses.
my_triangulate(matches_direct, cam_dict)
elif args.strategy == 'triangulate':
# run the triangulation step (modifies NED coordinates in
# place). This computes a best fit for all the feature
# locations based on the current best camera poses.
triangulate(matches_direct, cam_dict)
elif args.strategy == 'dem':
# project the keypoints back onto the DEM surface from the
# updated camera poses.
proj.fastProjectKeypointsTo3d(sss, cam_dict)
# estimate new world coordinates for each match point
for match in matches_direct:
sum = np.array( [0.0, 0.0, 0.0] )
for p in match[1:]:
sum += proj.image_list[ p[0] ].coord_list[ p[1] ]
ned = sum / len(match[1:])
# print "avg =", ned
match[0] = ned.tolist()
else:
print 'unknown triangulation strategy, script will probably fail to do anything useful'
surface1 = []
for match in matches_direct:
ned = match[0]
print ned
surface1.append( [ned[1], ned[0], -ned[2]] )
# transform all the feature points by the affine matrix (modifies
# matches_direct NED coordinates in place)
# fixme: transform_points(A, matches_direct)
# fixme: transform camera locations and orientations as well
# run solvePnP now on the updated points (hopefully this will
# naturally reorient the cameras as needed.)
# 9/6/2016: shouldn't be needed since transform_points() now rotates
# the camera orientation as well?
# cam_dict = solvePnP(newpts_dict)
cam1 = []
for key in cam_dict:
p = cam_dict[key]['ned']
cam1.append( [ p[1], p[0], -p[2] ] )
if args.plot:
plot(surface0, cam0, surface1, cam1)
count += 1
# test stop conditions
if args.iterations:
if count >= args.iterations:
print 'Stopping (by request) after', count, 'iterations.'
break
elif args.target_mre:
if mre <= args.target_mre:
print 'Stopping (by request) with mre:', mre
break
else:
print 'No stop condition specified, running one iteration and stopping.'
break
(result_list, mre, stddev) \
= proj.compute_reprojection_errors(cam_dict, matches_direct)
print 'Start mre:', start_mre, 'end mre:', mre
result=raw_input('Update matches and camera poses? (y/n):')
if result == 'y' or result == 'Y':
print 'Writing direct matches...'
pickle.dump(matches_direct, open(args.project+"/matches_direct", "wb"))
print 'Updating and saving camera poses...'
for image in proj.image_list:
pose = cam_dict[image.name]
Rned2cam, jac = cv2.Rodrigues(pose['rvec'])
pos = -np.matrix(Rned2cam[:3,:3]).T * np.matrix(pose['tvec'])
ned = pos.T[0].tolist()[0]
# Our Rcam matrix (in our ned coordinate system) is body2cam * Rned,
# so solvePnP returns this combination. We can extract Rned by
# premultiplying by cam2body aka inv(body2cam).
cam2body = image.get_cam2body()
Rned2body = cam2body.dot(Rned2cam)
Rbody2ned = np.matrix(Rned2body).T
(yaw, pitch, roll) \
= transformations.euler_from_matrix(Rbody2ned, 'rzyx')
# print "ypr =", [yaw/d2r, pitch/d2r, roll/d2r]
print 'orig:', image.get_camera_pose()
image.set_camera_pose( ned, [yaw/d2r, pitch/d2r, roll/d2r] )
print 'new: ', image.get_camera_pose()
image.save_meta()
| mit | -8,557,831,333,197,678,000 | 36.479936 | 97 | 0.592248 | false |
google-research/google-research | learning_parameter_allocation/data.py | 1 | 16445 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset utilities for MNIST and Omniglot datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
_ALL_SPLITS = ['train', 'validation', 'test']
def get_all_datapoints(dataset):
"""Returns all datapoints in a dataset.
Args:
dataset: (tf.data.Dataset) dataset containing the images to hash.
Returns:
A list of datapoints returned from the dataset.
"""
session = tf.Session(graph=tf.get_default_graph())
iterator = dataset.make_one_shot_iterator().get_next()
data = []
try:
while True:
data.append(session.run(iterator))
except tf.errors.OutOfRangeError:
pass
return data
def convert_list_to_dataset(data):
"""Shuffles a list of datapoints and converts it into tf.data.Dataset.
Args:
data: (list of dicts) list of datapoints, each being a dict containing
keys 'image' and 'label'.
Returns:
A tf.data.Dataset containing the datapoints from `data` in random order.
"""
np.random.shuffle(data)
images = np.array([datapoint['image'] for datapoint in data])
labels = np.array([datapoint['label'] for datapoint in data])
# Convert into a tf.data.Dataset.
data = tf.data.Dataset.from_tensor_slices((images, labels))
# Convert the datapoints from pairs back to dicts.
data = data.map(lambda image, label: {'image': image, 'label': label})
return data.cache()
def group_by_label(datapoints, num_labels):
"""Groups a list of datapoints by the classification label.
Args:
datapoints: (list of dicts) list of datapoints, each being a dict containing
keys 'image' and 'label'.
num_labels: (int) number of labels.
Returns:
A list of lists, containing all elements of `datapoints` grouped by
the 'label' key.
"""
data_grouped_by_label = [[] for _ in range(num_labels)]
for datapoint in datapoints:
data_grouped_by_label[datapoint['label']].append(datapoint)
return data_grouped_by_label
def get_mnist():
"""Loads the MNIST dataset.
Returns:
A pair of:
- a dictionary with keys 'train' and 'test', containing `tf.data.Dataset`s
for train and test, respectively.
- an integer denoting the number of classes in the dataset
"""
dataset = tfds.load('mnist')
train_dataset = dataset['train'].map(normalize).cache()
test_dataset = dataset['test'].map(normalize).cache()
return {
'train': train_dataset.shuffle(buffer_size=60000),
'test': test_dataset
}, 10
def get_mnist_in_cifar_format():
"""Loads the MNIST dataset, converts the inputs to 32x32 RGB images.
Returns:
A pair of:
- a dictionary with keys 'train' and 'test', containing `tf.data.Dataset`s
for train and test, respectively.
- an integer denoting the number of classes in the dataset
"""
task_data, num_classes = get_mnist()
return convert_format_mnist_to_cifar(task_data), num_classes
def get_rotated_mnist():
"""Loads the MNIST dataset with each input image rotated by 90 degrees.
Returns:
A pair of:
- a dictionary with keys 'train' and 'test', containing `tf.data.Dataset`s
for train and test, respectively.
- an integer denoting the number of classes in the dataset
"""
dataset = tfds.load('mnist')
train_dataset = dataset['train'].map(normalize).map(rotate90).cache()
test_dataset = dataset['test'].map(normalize).map(rotate90).cache()
return {
'train': train_dataset.shuffle(buffer_size=60000),
'test': test_dataset
}, 10
def get_fashion_mnist():
"""Loads the Fashion-MNIST dataset.
Returns:
A pair of:
- a dictionary with keys 'train' and 'test', containing `tf.data.Dataset`s
for train and test, respectively.
- an integer denoting the number of classes in the dataset
"""
dataset = tfds.load('fashion_mnist')
train_dataset = dataset['train'].map(normalize).cache()
test_dataset = dataset['test'].map(normalize).cache()
return {
'train': train_dataset.shuffle(buffer_size=60000),
'test': test_dataset
}, 10
def get_fashion_mnist_in_cifar_format():
"""Loads the Fashion-MNIST dataset, converts the inputs to 32x32 RGB images.
Returns:
A pair of:
- a dictionary with keys 'train' and 'test', containing `tf.data.Dataset`s
for train and test, respectively.
- an integer denoting the number of classes in the dataset
"""
task_data, num_classes = get_fashion_mnist()
return convert_format_mnist_to_cifar(task_data), num_classes
def get_leave_one_out_classification(task_data, num_classes, leave_out_class):
"""Creates a task of telling apart all classes besides one.
Args:
task_data: (dict) dictionary containing `tf.data.Dataset`s, for example
as returned from `get_mnist`.
num_classes: (int) number of classification classes in the original task.
leave_out_class: (int) id of the class that should be left out in
the returned task.
Returns:
A pair of:
- a dictionary containing `tf.data.Dataset`s for the new task.
- an integer denoting the number of classes in the new task.
"""
task_data = task_data.copy()
def convert_label(data):
data['label'] -= tf.cast(
tf.math.greater(data['label'], leave_out_class), dtype=tf.int64)
return data
def is_good_class(data):
if tf.math.equal(data['label'], leave_out_class):
return False
else:
return True
for split in task_data:
task_data[split] = task_data[split].filter(is_good_class)
task_data[split] = task_data[split].map(convert_label)
task_data[split] = task_data[split].cache()
return task_data, num_classes - 1
def convert_format_mnist_to_cifar(task_data):
"""Converts a dataset of MNIST-like grayscale images to 32x32 rgb images.
Args:
task_data: (dict) dictionary containing `tf.data.Dataset`s, for example
as returned from `get_mnist`.
Returns:
The `task_data` dict after conversion.
"""
task_data = task_data.copy()
for split in task_data:
task_data[split] = task_data[split].map(resize((32, 32)))
task_data[split] = task_data[split].map(convert_to_rgb)
task_data[split] = task_data[split].cache()
return task_data
def get_cifar100(coarse_label_id):
"""Loads one of the CIFAR-100 coarse label tasks.
Args:
coarse_label_id: (int) coarse label id, must be between 0 and 19 inclusive.
Returns:
A pair of:
- a dictionary with keys: 'train', 'validation' and 'test'. Values for
these keys are `tf.data.Dataset`s for train, validation and test,
respectively.
- an integer denoting the number of classes in the dataset
"""
assert 0 <= coarse_label_id < 20
def pred(datapoint):
return tf.math.equal(datapoint['coarse_label'], coarse_label_id)
dataset = tfds.load(
name='cifar100', as_dataset_kwargs={'shuffle_files': False})
def preprocess(dataset_split):
"""Preprocess the input dataset."""
dataset_split = dataset_split.filter(pred)
dataset_split = dataset_split.map(normalize)
all_datapoints = get_all_datapoints(dataset_split)
fine_labels = set([datapoint['label'] for datapoint in all_datapoints])
fine_labels = sorted(list(fine_labels))
assert len(fine_labels) == 5
split_size = len(all_datapoints)
formatter = get_cifar100_formatter(fine_labels)
dataset_split = dataset_split.map(formatter)
dataset_split = dataset_split.cache()
return dataset_split, split_size
train_dataset, train_size = preprocess(dataset['train'])
test_dataset, _ = preprocess(dataset['test'])
raw_train_data = []
raw_valid_data = []
for data_group in group_by_label(get_all_datapoints(train_dataset), 5):
group_size = len(data_group)
# Make sure that the datapoints can be divided evenly.
assert group_size % 5 == 0
np.random.shuffle(data_group)
train_size = int(0.8 * group_size)
raw_train_data += data_group[:train_size]
raw_valid_data += data_group[train_size:]
train_dataset = convert_list_to_dataset(raw_train_data)
valid_dataset = convert_list_to_dataset(raw_valid_data)
return {
'train': train_dataset.shuffle(buffer_size=len(raw_train_data)),
'validation': valid_dataset.shuffle(buffer_size=len(raw_valid_data)),
'test': test_dataset
}, 5
def get_omniglot_order():
"""Returns the omniglot alphabet names, in the order used in previous works.
Returns:
Alphabet names of the 50 Omniglot tasks, in the same order as used by
multiple previous works, such as "Diversity and Depth in Per-Example
Routing Models" (https://openreview.net/pdf?id=BkxWJnC9tX).
"""
return [
'Gujarati', 'Sylheti', 'Arcadian', 'Tibetan',
'Old_Church_Slavonic_(Cyrillic)', 'Angelic', 'Malay_(Jawi_-_Arabic)',
'Sanskrit', 'Cyrillic', 'Anglo-Saxon_Futhorc', 'Syriac_(Estrangelo)',
'Ge_ez', 'Japanese_(katakana)', 'Keble', 'Manipuri',
'Alphabet_of_the_Magi', 'Gurmukhi', 'Korean', 'Early_Aramaic',
'Atemayar_Qelisayer', 'Tagalog', 'Mkhedruli_(Georgian)',
'Inuktitut_(Canadian_Aboriginal_Syllabics)', 'Tengwar', 'Hebrew', 'N_Ko',
'Grantha', 'Latin', 'Syriac_(Serto)', 'Tifinagh', 'Balinese', 'Mongolian',
'ULOG', 'Futurama', 'Malayalam', 'Oriya',
'Ojibwe_(Canadian_Aboriginal_Syllabics)', 'Avesta', 'Kannada', 'Bengali',
'Japanese_(hiragana)', 'Armenian', 'Aurek-Besh', 'Glagolitic',
'Asomtavruli_(Georgian)', 'Greek', 'Braille', 'Burmese_(Myanmar)',
'Blackfoot_(Canadian_Aboriginal_Syllabics)', 'Atlantean'
]
def map_omniglot_alphabet_names_to_ids(alphabet_names):
"""Maps a list of Omniglot alphabet names into their ids.
Args:
alphabet_names: (list of strings) names of Omniglot alphabets to be mapped.
Returns:
A list of ints, corresponding to alphabet ids for the given names. All ids
are between 0 and 49 inclusive.
"""
_, info = tfds.load(name='omniglot', split=tfds.Split.ALL, with_info=True)
alphabet_ids = [
info.features['alphabet'].str2int(alphabet_name)
for alphabet_name in alphabet_names
]
return alphabet_ids
def get_omniglot(alphabet_id, size=None):
"""Loads one of the Omniglot alphabets.
Args:
alphabet_id: (int) alphabet id, must be between 0 and 49 inclusive.
size: either None or a pair of ints. If set to None, the images will not
be resized, and will retain their original size of 105x105. If set to
a pair of ints, then the images will be resized to this size.
Returns:
A pair of:
- a dictionary with keys: 'train', 'validation' and 'test'. Values for
these keys are `tf.data.Dataset`s for train, validation and test,
respectively.
- an integer denoting the number of classes in the dataset
"""
assert 0 <= alphabet_id < 50
np.random.seed(seed=alphabet_id)
pred = lambda datapoint: tf.math.equal(datapoint['alphabet'], alphabet_id)
# The `as_dataset_kwargs` argument makes this function deterministic.
dataset = tfds.load(
name='omniglot',
split=tfds.Split.ALL,
as_dataset_kwargs={'shuffle_files': False})
dataset = dataset.filter(pred)
dataset = dataset.map(format_omniglot)
dataset = dataset.map(normalize)
dataset = dataset.map(convert_to_grayscale)
# Flip to make the background consist of 0's and characters consist of 1's
# (instead of the other way around).
dataset = dataset.map(make_negative)
if size:
dataset = dataset.map(resize(size))
all_datapoints = get_all_datapoints(dataset)
num_classes = max([datapoint['label'] for datapoint in all_datapoints]) + 1
data = {data_split: [] for data_split in _ALL_SPLITS}
for data_group in group_by_label(all_datapoints, num_classes):
group_size = len(data_group)
# Make sure that the datapoints can be divided evenly.
assert group_size % 10 == 0
np.random.shuffle(data_group)
train_size = int(0.5 * group_size)
validation_size = int(0.2 * group_size)
data['train'] += data_group[:train_size]
data['validation'] += data_group[train_size:train_size+validation_size]
data['test'] += data_group[train_size+validation_size:]
train_size = len(data['train'])
for split in _ALL_SPLITS:
data[split] = convert_list_to_dataset(data[split])
# Ensure that the order of training data is different in every epoch.
data['train'] = data['train'].shuffle(buffer_size=train_size)
return data, num_classes
def get_data_for_multitask_omniglot_setup(num_alphabets):
"""Loads a given number of Omniglot datasets for multitask learning.
Args:
num_alphabets: (int) number of alphabets to use, must be between 1 and 50
inclusive.
Returns:
A pair of two lists (`task_data`, `num_classes_for_tasks`), each
containing one element per alphabet. These lists respectively contain
task input data and number of classification classes, as returned from
`get_omniglot`.
"""
alphabet_names = get_omniglot_order()[:num_alphabets]
alphabet_ids = map_omniglot_alphabet_names_to_ids(alphabet_names)
alphabets = [get_omniglot(alphabet_id) for alphabet_id in alphabet_ids]
# Convert a list of pairs into a pair of lists and return
return [list(tup) for tup in zip(*alphabets)]
def get_cifar100_formatter(fine_labels):
"""Formats a CIFAR-100 input into a standard format."""
def format_cifar100(data):
"""Formats a CIFAR-100 input into a standard format.
The formatted sample will have two keys: 'image', containing the input
image, and 'label' containing the label.
Args:
data: dict, a sample from the CIFAR-100 dataset. Contains keys: 'image',
'coarse_label' and 'label'.
Returns:
Formatted `data` dict.
"""
del data['coarse_label']
label = data['label']
data['label'] = -1
for i in range(len(fine_labels)):
if tf.math.equal(label, fine_labels[i]):
data['label'] = i
return data
return format_cifar100
def augment_with_random_crop(data, size=32):
"""Applies the "resize and crop" image augmentation.
Args:
data: dict, a sample from the dataset of size [32, 32, 3].
Contains keys: 'image' and 'label'.
size: (int) image size.
Returns:
The same dict, but after applying the image augmentation.
"""
x = data['image']
x = tf.image.resize_with_crop_or_pad(x, size + 8, size + 8)
x = tf.image.random_crop(x, [size, size, 3])
data['image'] = x
return data
def format_omniglot(data):
"""Formats an Omniglot input into a standard format.
The formatted sample will have two keys: 'image', containing the input image,
and 'label' containing the label.
Args:
data: dict, a sample from the Omniglot dataset. Contains keys: 'image',
'alphabet' and 'alphabet_char_id'.
Returns:
Formatted `data` dict.
"""
data['label'] = data['alphabet_char_id']
del data['alphabet_char_id']
del data['alphabet']
return data
def normalize(data):
data['image'] = tf.to_float(data['image']) / 255.
return data
def make_negative(data):
data['image'] = 1. - data['image']
return data
def rotate90(data):
data['image'] = tf.image.rot90(data['image'])
return data
def resize(size):
def resize_fn(data):
data['image'] = tf.image.resize_images(data['image'], size)
return data
return resize_fn
def convert_to_grayscale(data):
data['image'] = tf.image.rgb_to_grayscale(data['image'])
return data
def convert_to_rgb(data):
data['image'] = tf.image.grayscale_to_rgb(data['image'])
return data
def batch_all(dataset, batch_size):
"""Batches all splits in a dataset into batches of size `batch_size`."""
return {
key: dataset[key].batch(batch_size)
for key in dataset.keys()
}
| apache-2.0 | -2,702,922,259,381,244,400 | 28.577338 | 80 | 0.679599 | false |
joowani/dtags | dtags/commands/untag.py | 1 | 2801 | import sys
from pathlib import Path
from typing import List, Optional, Set, Tuple
from dtags import style
from dtags.commons import (
dtags_command,
get_argparser,
normalize_dirs,
normalize_tags,
prompt_user,
)
from dtags.files import load_config_file, save_config_file
USAGE = "untag [-y] [DIR ...] [-t TAG [TAG ...]]"
DESCRIPTION = f"""
Untag directories.
Tag names are automatically slugified (e.g "foo bar" to "foo-bar").
If no tags are specified, all tags are removed.
If no directories are specified, tags are removed from all directories.
examples:
# remove tags "app" and "work" from directories ~/foo and ~/bar
{style.command("untag ~/foo ~/bar -t app work")}
# remove all tags from directory ~/foo
{style.command("untag ~/foo")}
# remove tag "work" from all directories
{style.command("untag -t work")}
# skip confirmation prompts with -y/--yes
{style.command("untag -y ~/foo -t work app")}
"""
@dtags_command
def execute(args: Optional[List[str]] = None) -> None:
parser = get_argparser(prog="untag", desc=DESCRIPTION, usage=USAGE)
parser.add_argument(
"dirs",
metavar="DIR",
nargs="*",
help="directories or tags",
)
parser.add_argument(
"-y",
"--yes",
action="store_true",
dest="yes",
help="assume yes to prompts",
)
parser.add_argument(
"-t",
dest="tags",
metavar="TAG",
nargs="+",
help="tag names",
)
parsed_args = parser.parse_args(sys.argv[1:] if args is None else args)
if not parsed_args.dirs and not parsed_args.tags:
parser.error("one of the following arguments are required: DIR, -t")
else:
untag_directories(
dirs=parsed_args.dirs,
tags=parsed_args.tags,
skip_prompts=parsed_args.yes,
)
def untag_directories(
dirs: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
skip_prompts: bool = True,
) -> None:
config = load_config_file()
tag_config = config["tags"]
norm_dirs = normalize_dirs(dirs) if dirs else tag_config.keys()
norm_tags = normalize_tags(tags)
diffs: List[Tuple[Path, Set[str]]] = []
for dirpath in sorted(norm_dirs):
cur_tags = tag_config.get(dirpath, set())
del_tags = cur_tags.intersection(norm_tags) if norm_tags else cur_tags
if del_tags:
diffs.append((dirpath, del_tags))
tag_config[dirpath] = cur_tags - del_tags
if not diffs:
print("Nothing to do")
else:
for dirpath, del_tags in diffs:
print(style.diff(dirpath, del_tags=del_tags))
if skip_prompts or prompt_user():
save_config_file(config)
print("Tags removed successfully")
| mit | 7,909,547,519,584,736,000 | 26.194175 | 78 | 0.612281 | false |
asherkhb/coge | web/gobe/gobe.py | 1 | 7785 | #!/usr/bin/python
import web
import os
try:
import sqlite3
except ImportError:
from pysqlite2 import dbapi2 as sqlite3
import simplejson
import urllib
TMPDIR = "../tmp/GEvo/"
if not os.path.exists(TMPDIR):
TMPDIR = os.path.join(os.path.dirname(__file__), TMPDIR)
DBTMPL = os.path.join(TMPDIR, "%s.sqlite")
def getdb(dbname):
db = sqlite3.connect(DBTMPL % dbname)
db.row_factory = sqlite3.Row
return db
class info(object):
def GET(self, dbname):
web.header('Content-type', 'text/javascript')
db = getdb(dbname)
c = db.cursor()
c2 = db.cursor()
c.execute("SELECT * FROM image_info order by display_id")
c2.execute("SELECT min(xmin) as min, max(xmax) as max, image_id FROM image_data WHERE type='anchor' GROUP BY image_id ORDER BY image_id")
result = {}
for i, (row, anchor) in enumerate(zip(c, c2)):
result[row['iname']] = dict(
title=row['title'],
i=i,
img_width=row['px_width'],
bpmin=row['bpmin'],
bpmax=row['bpmax'],
idx=row['id'],
xmin=anchor['min'],
xmax=anchor['max']
)
return simplejson.dumps(result)
class follow(object):
def GET(self, dbname):
web.header('content-type', 'text/javascript')
db = getdb(dbname)
c = db.cursor()
c2 = db.cursor()
img = web.input(img=None).img
bbox = map(float, web.input().bbox.split(","))
ids = []
pair_ids = []
used_pairs = []
def get_pair_data(pair_id):
c.execute("""SELECT xmin, xmax, ymin, ymax, image_id, image_track FROM image_data WHERE id = ?""", (pair_id,))
p = c.fetchone()
return dict(
pair_id=pair_id,
pair_image_id=p['image_id'],
pair_track=p['image_track'],
pair_bbox=(p['xmin'], p['ymin'], p['xmax'], p['ymax']))
def get_pairs(img_id, bbox):
c.execute("""SELECT id, pair_id, image_id, xmin, xmax, ymin, ymax FROM image_data WHERE ? + 1 > xmin AND ? - 1 < xmax AND
? - 1 > ymin AND ? + 1 < ymax AND image_id = ? AND pair_id != -99 AND type = 'HSP'""", \
(bbox[2], bbox[0], bbox[3], bbox[1], img_id))
results = c.fetchall()
if not results: return None
pairs = []
for r in results:
d = dict(id=r['id'], bbox=(r['xmin'], r['ymin'], r['xmax'], r['ymax']), image_id=r['image_id'])
d.update(get_pair_data(r['pair_id']))
pairs.append(d)
return pairs
def get_pairs_for_bbox_image(xmin, xmax, img_id, exclude_track):
c.execute("""SELECT id, pair_id, image_id, xmin, xmax, ymin, ymax
FROM image_data WHERE ? + 1 > xmin AND ? - 1 < xmax AND
image_id = ? AND pair_id != -99 AND image_track != ? AND type = 'HSP'""", \
(xmax, xmin, img_id, exclude_track))
web.debug("""SELECT id, pair_id, image_id, xmin, xmax, ymin, ymax
FROM image_data WHERE ? + 1 > xmin AND ? - 1 < xmax AND
image_id = ? AND pair_id != -99 AND image_track != ? AND type = 'HSP'""")
web.debug((xmax, xmin, img_id, exclude_track))
results = c.fetchall()
pairs = []
for r in results:
d = dict(id=r['id'], bbox=(r['xmin'], r['ymin'], r['xmax'], r['ymax']), image_id=r['image_id'])
d.update(get_pair_data(r['pair_id']))
pairs.append(d)
return pairs
pairs = get_pairs(img, bbox)
i = 0
while True:
L = len(pairs)
if i == L: break
pair = pairs[i]
new_pairs = get_pairs(pair['pair_image_id'], pair['pair_bbox'])
for np in (_np for _np in new_pairs if not _np in pairs):
new2 = get_pairs_for_bbox_image(np['bbox'][0], np['bbox'][2], np['image_id'], np['pair_track'])
if new2 == []: continue
if not new2 in pairs:
pairs.append(new2)
pairs.extend([np for np in new_pairs if not np in pairs])
i += 1
if L == len(pairs): break
web.debug(pairs)
class query(object):
def GET(self, dbname):
db = getdb(dbname)
c = db.cursor()
img = web.input(img=None).img
if web.input(bbox=None).bbox:
bbox = map(float, web.input().bbox.split(","))
c.execute("""SELECT * FROM image_data WHERE ? + 1 > xmin AND ? - 1 < xmax AND
? - 1 > ymin AND ? + 1 < ymax AND image_id = ? AND pair_id != -99 AND type = 'HSP'""", \
(bbox[2], bbox[0], bbox[3], bbox[1], img))
elif web.input(all=None).all:
c.execute("""SELECT distinct(image_track) as image_track FROM image_data WHERE ?
BETWEEN ymin AND ymax AND image_id = ? ORDER BY
ABS(image_track) DESC""", (float(web.input().y), img))
track = c.fetchone()['image_track']
web.debug(track)
c.execute("""SELECT id, xmin, xmax, ymin, ymax, image_id, image_track, pair_id, color, link FROM image_data
WHERE ( (image_track = ?) or (image_track = (? * -1) ) )
and image_id = ? and pair_id != -99 and type = 'HSP'""", (track, track, img))
else: # point query.
x = float(web.input().x)
y = float(web.input().y)
c.execute("""SELECT * FROM image_data WHERE ? + 3 > xmin AND ? - 3
< xmax AND ? BETWEEN ymin and ymax and image_id = ?""",
(x, x, y, img))
c2 = db.cursor()
# now iterate over the cursor
results = []
for result in c:
c2.execute("""SELECT id, xmin, xmax, ymin, ymax, image_id,
image_track, pair_id, color FROM image_data where
id = ?""", (result['pair_id'], ));
pair = c2.fetchone()
try:
anno = result['annotation']
if anno.startswith('http'):
anno = urllib.urlopen(anno).read()
except:
anno = ""
f1pts = []
f2pts = []
for k in ('xmin', 'ymin', 'xmax', 'ymax'):
f1pts.append(int(round(result[k])))
if pair:
f2pts.append(int(round(pair[k])))
f1pts.extend([result['id'], result['image_track']])
if pair:
f2pts.extend([pair['id'], pair['image_track']])
results.append(dict(
# TODO: tell eric to add 'CoGe' to the start of his links.
link=result['link'],
annotation = anno,
# TODO has_pair
has_pair= bool(pair),
color=(result['color'] or (pair and pair['color'])).replace('#', '0x'),
features={
'key%i' % result['image_id']: f1pts,
'key%i' % (pair and pair['image_id'] or 999): f2pts}
))
web.header('Content-type', 'text/javascript')
return simplejson.dumps({'resultset':results})
urls = (
# the first pattern is always the sqlite db name. e.g.: /GEVo_WxUonWBr/info
'/([^\/]+)/info/', 'info',
'/([^\/]+)/follow/', 'follow',
'/([^\/]+)/query/', 'query',
)
app = web.application(urls, locals())
application = app.wsgifunc()
if __name__ == "__main__":
app.run()
| bsd-2-clause | 5,173,759,947,039,488,000 | 37.925 | 145 | 0.479897 | false |
mjasher/gac | GAC/flopy/modpath/mpbas.py | 1 | 6164 | """
mpbas module. Contains the ModpathBas class. Note that the user can access
the ModpathBas class as `flopy.modflow.ModpathBas`.
Additional information for this MODFLOW/MODPATH package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?bas6.htm>`_.
"""
import numpy as np
from numpy import empty, array
from flopy.mbase import Package
from flopy.utils import util_2d, util_3d
class ModpathBas(Package):
"""
MODPATH Basic Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modpath.mp.Modpath`) to which
this package will be added.
hnoflo : float
Head value assigned to inactive cells (default is -9999.).
hdry : float
Head value assigned to dry cells (default is -8888.).
def_face_ct : int
Number fo default iface codes to read (default is 0).
bud_label : str or list of strs
MODFLOW budget item to which a default iface is assigned.
def_iface : int or list of ints
Cell face (iface) on which to assign flows from MODFLOW budget file.
laytyp : int or list of ints
MODFLOW layer type (0 is convertible, 1 is confined).
ibound : array of ints, optional
The ibound array (the default is 1).
prsity : array of ints, optional
The porosity array (the default is 0.30).
prsityCB : array of ints, optional
The porosity array for confining beds (the default is 0.30).
extension : str, optional
File extension (default is 'mpbas').
Attributes
----------
heading : str
Text string written to top of package input file.
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> m = flopy.modpath.Modpath()
>>> mpbas = flopy.modpath.ModpathBas(m)
"""
def __init__(self, model, hnoflo=-9999., hdry=-8888.,
def_face_ct=0, bud_label=None, def_iface=None,
laytyp=0, ibound=1, prsity=0.30, prsityCB=0.30,
extension='mpbas', unitnumber = 86):
"""
Package constructor.
"""
Package.__init__(self, model, extension, 'MPBAS', unitnumber)
nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper
self.parent.mf.get_name_file_entries()
self.heading1 = '# MPBAS for Modpath, generated by Flopy.'
self.heading2 = '#'
self.hnoflo = hnoflo
self.hdry = hdry
self.def_face_ct = def_face_ct
self.bud_label = bud_label
self.def_iface = def_iface
self.laytyp = laytyp
self.__ibound = util_3d(model, (nlay, nrow, ncol), np.int, ibound,
name='ibound', locat=self.unit_number[0])
self.prsity = prsity
self.prsityCB = prsityCB
self.prsity = util_3d(model,(nlay,nrow,ncol),np.float32,\
prsity,name='prsity',locat=self.unit_number[0])
self.prsityCB = util_3d(model,(nlay,nrow,ncol),np.float32,\
prsityCB,name='prsityCB',locat=self.unit_number[0])
self.parent.add_package(self)
def getibound(self):
"""
Return the ibound array.
Returns
-------
ibound : numpy.ndarray (nlay, nrow, ncol)
ibound object.
"""
return self.__ibound.array
def setibound(self, ibound):
"""
Set the ibound array.
"""
model = self.parent
nrow, ncol, nlay, nper = model.nrow_ncol_nlay_nper
self.__ibound = util_3d(model, (nlay, nrow, ncol), np.int, ibound,
name='ibound', locat=self.unit_number[0])
return
ibound = property(getibound, setibound)
def write_file(self):
"""
Write the package input file.
"""
nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper
ModflowDis = self.parent.mf.get_package('DIS')
# Open file for writing
f_bas = open(self.fn_path, 'w')
f_bas.write('#{0:s}\n#{1:s}\n'.format(self.heading1,self.heading2))
f_bas.write('{0:16.6f} {1:16.6f}\n'\
.format(self.hnoflo, self.hdry))
f_bas.write('{0:4d}\n'\
.format(self.def_face_ct))
if self.def_face_ct > 0:
for i in range(self.def_face_ct):
f_bas.write('{0:20s}\n'.format(self.bud_label[i]))
f_bas.write('{0:2d}\n'.format(self.def_iface[i]))
#f_bas.write('\n')
flow_package = self.parent.mf.get_package('BCF6')
if (flow_package != None):
lc = util_2d(self.parent,(nlay,),np.int,\
flow_package.laycon.get_value(),name='bas - laytype',\
locat=self.unit_number[0])
else:
flow_package = self.parent.mf.get_package('LPF')
if (flow_package != None):
lc = util_2d(self.parent,(nlay,),\
np.int,flow_package.laytyp.get_value(),\
name='bas - laytype',locat=self.unit_number[0])
else:
flow_package = self.parent.mf.get_package('UPW')
if (flow_package != None):
lc = util_2d(self.parent,(nlay,),\
np.int,flow_package.laytyp.get_value(),\
name='bas - laytype', locat=self.unit_number[0])
# need to reset lc fmtin
lc.set_fmtin('(40I2)')
f_bas.write(lc.string)
# from modpath bas--uses keyword array types
f_bas.write(self.__ibound.get_file_entry())
# from MT3D bas--uses integer array types
#f_bas.write(self.ibound.get_file_entry())
f_bas.write(self.prsity.get_file_entry())
f_bas.write(self.prsityCB.get_file_entry())
f_bas.close() | gpl-2.0 | 3,510,775,014,214,753,300 | 34.922156 | 91 | 0.540234 | false |
mumrah/kafka-python | kafka/protocol/admin.py | 1 | 23889 | from __future__ import absolute_import
from kafka.protocol.api import Request, Response
from kafka.protocol.types import Array, Boolean, Bytes, Int8, Int16, Int32, Int64, Schema, String
class ApiVersionResponse_v0(Response):
API_KEY = 18
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('api_versions', Array(
('api_key', Int16),
('min_version', Int16),
('max_version', Int16)))
)
class ApiVersionResponse_v1(Response):
API_KEY = 18
API_VERSION = 1
SCHEMA = Schema(
('error_code', Int16),
('api_versions', Array(
('api_key', Int16),
('min_version', Int16),
('max_version', Int16))),
('throttle_time_ms', Int32)
)
class ApiVersionResponse_v2(Response):
API_KEY = 18
API_VERSION = 2
SCHEMA = ApiVersionResponse_v1.SCHEMA
class ApiVersionRequest_v0(Request):
API_KEY = 18
API_VERSION = 0
RESPONSE_TYPE = ApiVersionResponse_v0
SCHEMA = Schema()
class ApiVersionRequest_v1(Request):
API_KEY = 18
API_VERSION = 1
RESPONSE_TYPE = ApiVersionResponse_v1
SCHEMA = ApiVersionRequest_v0.SCHEMA
class ApiVersionRequest_v2(Request):
API_KEY = 18
API_VERSION = 2
RESPONSE_TYPE = ApiVersionResponse_v1
SCHEMA = ApiVersionRequest_v0.SCHEMA
ApiVersionRequest = [
ApiVersionRequest_v0, ApiVersionRequest_v1, ApiVersionRequest_v2,
]
ApiVersionResponse = [
ApiVersionResponse_v0, ApiVersionResponse_v1, ApiVersionResponse_v2,
]
class CreateTopicsResponse_v0(Response):
API_KEY = 19
API_VERSION = 0
SCHEMA = Schema(
('topic_errors', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class CreateTopicsResponse_v1(Response):
API_KEY = 19
API_VERSION = 1
SCHEMA = Schema(
('topic_errors', Array(
('topic', String('utf-8')),
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreateTopicsResponse_v2(Response):
API_KEY = 19
API_VERSION = 2
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topic_errors', Array(
('topic', String('utf-8')),
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreateTopicsResponse_v3(Response):
API_KEY = 19
API_VERSION = 3
SCHEMA = CreateTopicsResponse_v2.SCHEMA
class CreateTopicsRequest_v0(Request):
API_KEY = 19
API_VERSION = 0
RESPONSE_TYPE = CreateTopicsResponse_v0
SCHEMA = Schema(
('create_topic_requests', Array(
('topic', String('utf-8')),
('num_partitions', Int32),
('replication_factor', Int16),
('replica_assignment', Array(
('partition_id', Int32),
('replicas', Array(Int32)))),
('configs', Array(
('config_key', String('utf-8')),
('config_value', String('utf-8')))))),
('timeout', Int32)
)
class CreateTopicsRequest_v1(Request):
API_KEY = 19
API_VERSION = 1
RESPONSE_TYPE = CreateTopicsResponse_v1
SCHEMA = Schema(
('create_topic_requests', Array(
('topic', String('utf-8')),
('num_partitions', Int32),
('replication_factor', Int16),
('replica_assignment', Array(
('partition_id', Int32),
('replicas', Array(Int32)))),
('configs', Array(
('config_key', String('utf-8')),
('config_value', String('utf-8')))))),
('timeout', Int32),
('validate_only', Boolean)
)
class CreateTopicsRequest_v2(Request):
API_KEY = 19
API_VERSION = 2
RESPONSE_TYPE = CreateTopicsResponse_v2
SCHEMA = CreateTopicsRequest_v1.SCHEMA
class CreateTopicsRequest_v3(Request):
API_KEY = 19
API_VERSION = 3
RESPONSE_TYPE = CreateTopicsResponse_v3
SCHEMA = CreateTopicsRequest_v1.SCHEMA
CreateTopicsRequest = [
CreateTopicsRequest_v0, CreateTopicsRequest_v1,
CreateTopicsRequest_v2, CreateTopicsRequest_v3,
]
CreateTopicsResponse = [
CreateTopicsResponse_v0, CreateTopicsResponse_v1,
CreateTopicsResponse_v2, CreateTopicsResponse_v3,
]
class DeleteTopicsResponse_v0(Response):
API_KEY = 20
API_VERSION = 0
SCHEMA = Schema(
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class DeleteTopicsResponse_v1(Response):
API_KEY = 20
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class DeleteTopicsResponse_v2(Response):
API_KEY = 20
API_VERSION = 2
SCHEMA = DeleteTopicsResponse_v1.SCHEMA
class DeleteTopicsResponse_v3(Response):
API_KEY = 20
API_VERSION = 3
SCHEMA = DeleteTopicsResponse_v1.SCHEMA
class DeleteTopicsRequest_v0(Request):
API_KEY = 20
API_VERSION = 0
RESPONSE_TYPE = DeleteTopicsResponse_v0
SCHEMA = Schema(
('topics', Array(String('utf-8'))),
('timeout', Int32)
)
class DeleteTopicsRequest_v1(Request):
API_KEY = 20
API_VERSION = 1
RESPONSE_TYPE = DeleteTopicsResponse_v1
SCHEMA = DeleteTopicsRequest_v0.SCHEMA
class DeleteTopicsRequest_v2(Request):
API_KEY = 20
API_VERSION = 2
RESPONSE_TYPE = DeleteTopicsResponse_v2
SCHEMA = DeleteTopicsRequest_v0.SCHEMA
class DeleteTopicsRequest_v3(Request):
API_KEY = 20
API_VERSION = 3
RESPONSE_TYPE = DeleteTopicsResponse_v3
SCHEMA = DeleteTopicsRequest_v0.SCHEMA
DeleteTopicsRequest = [
DeleteTopicsRequest_v0, DeleteTopicsRequest_v1,
DeleteTopicsRequest_v2, DeleteTopicsRequest_v3,
]
DeleteTopicsResponse = [
DeleteTopicsResponse_v0, DeleteTopicsResponse_v1,
DeleteTopicsResponse_v2, DeleteTopicsResponse_v3,
]
class ListGroupsResponse_v0(Response):
API_KEY = 16
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsResponse_v1(Response):
API_KEY = 16
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsResponse_v2(Response):
API_KEY = 16
API_VERSION = 2
SCHEMA = ListGroupsResponse_v1.SCHEMA
class ListGroupsRequest_v0(Request):
API_KEY = 16
API_VERSION = 0
RESPONSE_TYPE = ListGroupsResponse_v0
SCHEMA = Schema()
class ListGroupsRequest_v1(Request):
API_KEY = 16
API_VERSION = 1
RESPONSE_TYPE = ListGroupsResponse_v1
SCHEMA = ListGroupsRequest_v0.SCHEMA
class ListGroupsRequest_v2(Request):
API_KEY = 16
API_VERSION = 1
RESPONSE_TYPE = ListGroupsResponse_v2
SCHEMA = ListGroupsRequest_v0.SCHEMA
ListGroupsRequest = [
ListGroupsRequest_v0, ListGroupsRequest_v1,
ListGroupsRequest_v2,
]
ListGroupsResponse = [
ListGroupsResponse_v0, ListGroupsResponse_v1,
ListGroupsResponse_v2,
]
class DescribeGroupsResponse_v0(Response):
API_KEY = 15
API_VERSION = 0
SCHEMA = Schema(
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsResponse_v1(Response):
API_KEY = 15
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsResponse_v2(Response):
API_KEY = 15
API_VERSION = 2
SCHEMA = DescribeGroupsResponse_v1.SCHEMA
class DescribeGroupsResponse_v3(Response):
API_KEY = 15
API_VERSION = 3
SCHEMA = Schema(
('throttle_time_ms', Int32),
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))),
('authorized_operations', Int32))
)
class DescribeGroupsRequest_v0(Request):
API_KEY = 15
API_VERSION = 0
RESPONSE_TYPE = DescribeGroupsResponse_v0
SCHEMA = Schema(
('groups', Array(String('utf-8')))
)
class DescribeGroupsRequest_v1(Request):
API_KEY = 15
API_VERSION = 1
RESPONSE_TYPE = DescribeGroupsResponse_v1
SCHEMA = DescribeGroupsRequest_v0.SCHEMA
class DescribeGroupsRequest_v2(Request):
API_KEY = 15
API_VERSION = 2
RESPONSE_TYPE = DescribeGroupsResponse_v2
SCHEMA = DescribeGroupsRequest_v0.SCHEMA
class DescribeGroupsRequest_v3(Request):
API_KEY = 15
API_VERSION = 3
RESPONSE_TYPE = DescribeGroupsResponse_v2
SCHEMA = Schema(
('groups', Array(String('utf-8'))),
('include_authorized_operations', Boolean)
)
DescribeGroupsRequest = [
DescribeGroupsRequest_v0, DescribeGroupsRequest_v1,
DescribeGroupsRequest_v2, DescribeGroupsRequest_v3,
]
DescribeGroupsResponse = [
DescribeGroupsResponse_v0, DescribeGroupsResponse_v1,
DescribeGroupsResponse_v2, DescribeGroupsResponse_v3,
]
class SaslHandShakeResponse_v0(Response):
API_KEY = 17
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('enabled_mechanisms', Array(String('utf-8')))
)
class SaslHandShakeResponse_v1(Response):
API_KEY = 17
API_VERSION = 1
SCHEMA = SaslHandShakeResponse_v0.SCHEMA
class SaslHandShakeRequest_v0(Request):
API_KEY = 17
API_VERSION = 0
RESPONSE_TYPE = SaslHandShakeResponse_v0
SCHEMA = Schema(
('mechanism', String('utf-8'))
)
class SaslHandShakeRequest_v1(Request):
API_KEY = 17
API_VERSION = 1
RESPONSE_TYPE = SaslHandShakeResponse_v1
SCHEMA = SaslHandShakeRequest_v0.SCHEMA
SaslHandShakeRequest = [SaslHandShakeRequest_v0, SaslHandShakeRequest_v1]
SaslHandShakeResponse = [SaslHandShakeResponse_v0, SaslHandShakeResponse_v1]
class DescribeAclsResponse_v0(Response):
API_KEY = 29
API_VERSION = 0
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16),
('error_message', String('utf-8')),
('resources', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('acls', Array(
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))))
)
class DescribeAclsResponse_v1(Response):
API_KEY = 29
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16),
('error_message', String('utf-8')),
('resources', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('resource_pattern_type', Int8),
('acls', Array(
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))))
)
class DescribeAclsRequest_v0(Request):
API_KEY = 29
API_VERSION = 0
RESPONSE_TYPE = DescribeAclsResponse_v0
SCHEMA = Schema(
('resource_type', Int8),
('resource_name', String('utf-8')),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)
)
class DescribeAclsRequest_v1(Request):
API_KEY = 29
API_VERSION = 1
RESPONSE_TYPE = DescribeAclsResponse_v1
SCHEMA = Schema(
('resource_type', Int8),
('resource_name', String('utf-8')),
('resource_pattern_type_filter', Int8),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)
)
DescribeAclsRequest = [DescribeAclsRequest_v0, DescribeAclsRequest_v1]
DescribeAclsResponse = [DescribeAclsResponse_v0, DescribeAclsResponse_v1]
class CreateAclsResponse_v0(Response):
API_KEY = 30
API_VERSION = 0
SCHEMA = Schema(
('throttle_time_ms', Int32),
('creation_responses', Array(
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreateAclsResponse_v1(Response):
API_KEY = 30
API_VERSION = 1
SCHEMA = CreateAclsResponse_v0.SCHEMA
class CreateAclsRequest_v0(Request):
API_KEY = 30
API_VERSION = 0
RESPONSE_TYPE = CreateAclsResponse_v0
SCHEMA = Schema(
('creations', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))
)
class CreateAclsRequest_v1(Request):
API_KEY = 30
API_VERSION = 1
RESPONSE_TYPE = CreateAclsResponse_v1
SCHEMA = Schema(
('creations', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('resource_pattern_type', Int8),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))
)
CreateAclsRequest = [CreateAclsRequest_v0, CreateAclsRequest_v1]
CreateAclsResponse = [CreateAclsResponse_v0, CreateAclsResponse_v1]
class DeleteAclsResponse_v0(Response):
API_KEY = 31
API_VERSION = 0
SCHEMA = Schema(
('throttle_time_ms', Int32),
('filter_responses', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('matching_acls', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('resource_type', Int8),
('resource_name', String('utf-8')),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))))
)
class DeleteAclsResponse_v1(Response):
API_KEY = 31
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('filter_responses', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('matching_acls', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('resource_type', Int8),
('resource_name', String('utf-8')),
('resource_pattern_type', Int8),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))))
)
class DeleteAclsRequest_v0(Request):
API_KEY = 31
API_VERSION = 0
RESPONSE_TYPE = DeleteAclsResponse_v0
SCHEMA = Schema(
('filters', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))
)
class DeleteAclsRequest_v1(Request):
API_KEY = 31
API_VERSION = 1
RESPONSE_TYPE = DeleteAclsResponse_v1
SCHEMA = Schema(
('filters', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('resource_pattern_type_filter', Int8),
('principal', String('utf-8')),
('host', String('utf-8')),
('operation', Int8),
('permission_type', Int8)))
)
DeleteAclsRequest = [DeleteAclsRequest_v0, DeleteAclsRequest_v1]
DeleteAclsResponse = [DeleteAclsResponse_v0, DeleteAclsResponse_v1]
class AlterConfigsResponse_v0(Response):
API_KEY = 33
API_VERSION = 0
SCHEMA = Schema(
('throttle_time_ms', Int32),
('resources', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('resource_type', Int8),
('resource_name', String('utf-8'))))
)
class AlterConfigsResponse_v1(Response):
API_KEY = 33
API_VERSION = 1
SCHEMA = AlterConfigsResponse_v0.SCHEMA
class AlterConfigsRequest_v0(Request):
API_KEY = 33
API_VERSION = 0
RESPONSE_TYPE = AlterConfigsResponse_v0
SCHEMA = Schema(
('resources', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('config_entries', Array(
('config_name', String('utf-8')),
('config_value', String('utf-8')))))),
('validate_only', Boolean)
)
class AlterConfigsRequest_v1(Request):
API_KEY = 33
API_VERSION = 1
RESPONSE_TYPE = AlterConfigsResponse_v1
SCHEMA = AlterConfigsRequest_v0.SCHEMA
AlterConfigsRequest = [AlterConfigsRequest_v0, AlterConfigsRequest_v1]
AlterConfigsResponse = [AlterConfigsResponse_v0, AlterConfigsRequest_v1]
class DescribeConfigsResponse_v0(Response):
API_KEY = 32
API_VERSION = 0
SCHEMA = Schema(
('throttle_time_ms', Int32),
('resources', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('resource_type', Int8),
('resource_name', String('utf-8')),
('config_entries', Array(
('config_names', String('utf-8')),
('config_value', String('utf-8')),
('read_only', Boolean),
('is_default', Boolean),
('is_sensitive', Boolean)))))
)
class DescribeConfigsResponse_v1(Response):
API_KEY = 32
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('resources', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('resource_type', Int8),
('resource_name', String('utf-8')),
('config_entries', Array(
('config_names', String('utf-8')),
('config_value', String('utf-8')),
('read_only', Boolean),
('is_default', Boolean),
('is_sensitive', Boolean),
('config_synonyms', Array(
('config_name', String('utf-8')),
('config_value', String('utf-8')),
('config_source', Int8)))))))
)
class DescribeConfigsResponse_v2(Response):
API_KEY = 32
API_VERSION = 2
SCHEMA = Schema(
('throttle_time_ms', Int32),
('resources', Array(
('error_code', Int16),
('error_message', String('utf-8')),
('resource_type', Int8),
('resource_name', String('utf-8')),
('config_entries', Array(
('config_names', String('utf-8')),
('config_value', String('utf-8')),
('read_only', Boolean),
('config_source', Int8),
('is_sensitive', Boolean),
('config_synonyms', Array(
('config_name', String('utf-8')),
('config_value', String('utf-8')),
('config_source', Int8)))))))
)
class DescribeConfigsRequest_v0(Request):
API_KEY = 32
API_VERSION = 0
RESPONSE_TYPE = DescribeConfigsResponse_v0
SCHEMA = Schema(
('resources', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('config_names', Array(String('utf-8')))))
)
class DescribeConfigsRequest_v1(Request):
API_KEY = 32
API_VERSION = 1
RESPONSE_TYPE = DescribeConfigsResponse_v1
SCHEMA = Schema(
('resources', Array(
('resource_type', Int8),
('resource_name', String('utf-8')),
('config_names', Array(String('utf-8'))))),
('include_synonyms', Boolean)
)
class DescribeConfigsRequest_v2(Request):
API_KEY = 32
API_VERSION = 2
RESPONSE_TYPE = DescribeConfigsResponse_v2
SCHEMA = DescribeConfigsRequest_v1.SCHEMA
DescribeConfigsRequest = [
DescribeConfigsRequest_v0, DescribeConfigsRequest_v1,
DescribeConfigsRequest_v2,
]
DescribeConfigsResponse = [
DescribeConfigsResponse_v0, DescribeConfigsResponse_v1,
DescribeConfigsResponse_v2,
]
class SaslAuthenticateResponse_v0(Response):
API_KEY = 36
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('error_message', String('utf-8')),
('sasl_auth_bytes', Bytes)
)
class SaslAuthenticateResponse_v1(Response):
API_KEY = 36
API_VERSION = 1
SCHEMA = Schema(
('error_code', Int16),
('error_message', String('utf-8')),
('sasl_auth_bytes', Bytes),
('session_lifetime_ms', Int64)
)
class SaslAuthenticateRequest_v0(Request):
API_KEY = 36
API_VERSION = 0
RESPONSE_TYPE = SaslAuthenticateResponse_v0
SCHEMA = Schema(
('sasl_auth_bytes', Bytes)
)
class SaslAuthenticateRequest_v1(Request):
API_KEY = 36
API_VERSION = 1
RESPONSE_TYPE = SaslAuthenticateResponse_v1
SCHEMA = SaslAuthenticateRequest_v0.SCHEMA
SaslAuthenticateRequest = [
SaslAuthenticateRequest_v0, SaslAuthenticateRequest_v1,
]
SaslAuthenticateResponse = [
SaslAuthenticateResponse_v0, SaslAuthenticateResponse_v1,
]
class CreatePartitionsResponse_v0(Response):
API_KEY = 37
API_VERSION = 0
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topic_errors', Array(
('topic', String('utf-8')),
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreatePartitionsResponse_v1(Response):
API_KEY = 37
API_VERSION = 1
SCHEMA = CreatePartitionsResponse_v0.SCHEMA
class CreatePartitionsRequest_v0(Request):
API_KEY = 37
API_VERSION = 0
RESPONSE_TYPE = CreatePartitionsResponse_v0
SCHEMA = Schema(
('topic_partitions', Array(
('topic', String('utf-8')),
('new_partitions', Schema(
('count', Int32),
('assignment', Array(Array(Int32))))))),
('timeout', Int32),
('validate_only', Boolean)
)
class CreatePartitionsRequest_v1(Request):
API_KEY = 37
API_VERSION = 1
SCHEMA = CreatePartitionsRequest_v0.SCHEMA
RESPONSE_TYPE = CreatePartitionsResponse_v1
CreatePartitionsRequest = [
CreatePartitionsRequest_v0, CreatePartitionsRequest_v1,
]
CreatePartitionsResponse = [
CreatePartitionsResponse_v0, CreatePartitionsResponse_v1,
]
| apache-2.0 | 1,102,737,269,104,524,300 | 26.649306 | 97 | 0.573025 | false |
dattalab/d_code | events/eventRoutines.py | 1 | 23750 | """Event arrays are 2D label arrays (time x ROI) that are generated from an
array of fluorescent traces of the same size.
Uses the following inequality to determine if an event occured at a specific time in a cell:
dF/F of cell > (baseline of cell + std_threshold * std of cell * alpha)
See the findEvents() docstring for more info.
These routines are used to create and analyze event arrays. Note that
some of the event utility functions return masked numpy arrays. This
is because generally, there are different number of events in each
cell during each trial. Anywhere there wasn't an event is a 'np.nan'
value, and the mask will ensure that it isn't used to calcuate things
like mean(), min(), max() etc.
"""
import numpy as np
import traces as tm
from sklearn.mixture import GMM
import scipy.ndimage as nd
import mahotas
__all__ = ['findEvents', 'findEventsGMM', 'findEventsBackground',
'getCounts', 'getStartsAndStops', 'getDurations', 'getAvgAmplitudes', 'getWeightedEvents',
'fitGaussianMixture1D', 'getGMMBaselines']
#----------------------------------------EVENT FINDING FUNCTIONS AND WRAPPERS-----------------------------------
def findEvents(traces, stds, std_threshold=2.5, falling_std_threshold=None, baselines=None, boxWidth=3, minimum_length=2, alpha=None):
"""Core event finding routine with flexible syntax.
Uses the following inequality to determine if an event occured at a specific time in a cell:
dF/F of cell > (baseline of cell + std_threshold * std of cell * alpha)
By default, the baseline is 0.0 (the dF/F traces have been baselined). This baseline can be
explicitly specified using the `baselines` parameter. If `baselines` is a 1d array, it is a
global correction value. If `baselines` is exactly the same size as `traces`, the routine
assumes that the baselines have been explicitly specificed across all cells, trials and frames.
If `baselines` is of size (time x trials), then the routine assumes that the basline value has
been determined for the whole population on a trial by trial basis. This is done in the routines
`findEventsBackground` and `findEventsGMM`.
The `alpha` parameter is here for flexibility. It allows for the scaling of the threshold of detection
on a cell by cell, frame by frame basis indepedent of the noise of a cell or it's baseline value.
If specified it must be the exact same size as `traces`. By default it is set to 1.0.
The routine returns an event array exactly the same size as `traces`, where each event is labeled with
a unique number (an integer). The background is labeled with '0'. This can be used in all the utility
routines below.
:param: traces - 2 or 3d numpy array of baselined and normalized traces (time x cells, or time x cells x trials)
:param: stds - 1 or 2d numpy event array of per-cell std values (cells, or cells x trials)
:param: std_threshold - multiple of per-cell STD to use for an event (float)
:param: falling_std_threshold - optional multiple of per-cell STD to use as end of an event (float)
:param: baselines - optional estimation of the baseline values of the cells
:param: boxWidth - filter size for smoothing traces and background values before detection
:param: minimum_length - minimum length of an event
:param: alpha - optional scaling parameter for adjusting thresholds
:returns: numpy array same shape and size of traces, with each event given a unique integer label
"""
if traces.ndim == 2:
traces = np.atleast_3d(traces) # time x cells x trials
stds = np.atleast_2d(stds).T # cells x trials
time, cells, trials = traces.shape
events = np.zeros_like(traces)
# broadcasting of baselines. ends up as time x cells x trials. this is really annoying,
# but relying on numpy to broadcast things was tricky and problembatic. idea here is to
# get baselines identical to traces
if baselines is None: # no baseline correction, default
full_baselines = np.zeros_like(traces)
elif baselines.shape == (time): # one global correction
full_baselines = np.zeros_like(traces)
for trial in range(trials):
for cell in range(cells):
full_baselines[:,cell,trial] = baselines
elif baselines.shape ==(time, cells): # full, but only one trial
full_baselines = baselines[:,:,None]
elif baselines.shape == (time, trials): # modeled on a trial by trial basis
full_baselines = np.zeros_like(traces)
for trial in range(trials):
for cell in range(cells):
full_baselines[:,cell,trial] = baselines[:,trial]
# this is a check to prevent a dip in the global population from calling stuff responders
# basically, if the estimated baseline falls below zero, we fall back to the implicit background
# value of 0.0
full_baselines[full_baselines<0.0] = 0.0
# alpha is a scaling factor for event detection. if used it has to be the same size and shape as traces.
# no broadcasting is done here. it scales the threshold for detection so by default it is 1.0 everywhere.
if alpha is None:
alpha = np.ones_like(full_baselines)
# smooth traces and baselines
if boxWidth is not 0:
traces_smoothed = nd.convolve1d(traces, np.array([1]*boxWidth)/float(boxWidth), axis=0)
baselines_smoothed = nd.convolve1d(full_baselines, np.array([1]*boxWidth)/float(boxWidth), axis=0)
# detect events
for trial in range(trials):
for cell in range(cells):
events[:,cell,trial] = traces_smoothed[:,cell,trial] > baselines_smoothed[:,cell,trial] + (stds[cell, trial] * float(std_threshold) * alpha[:,cell,trial])
# filter for minimum length
events = mahotas.label(events, np.array([1,1])[:,np.newaxis,np.newaxis])[0]
for single_event in range(1, events.max()+1):
if (events == single_event).sum() <= minimum_length:
events[events == single_event] = 0
events = events>0
# if a falling std is specified, extend events until they drop below that threshold
if falling_std_threshold is not None:
for trial in range(trials):
for cell in range(cells):
falling_thresh_events = traces_smoothed[:,cell,trial] > baselines_smoothed[:,cell,trial] + (stds[cell, trial] * float(falling_std_threshold) * alpha[:,cell,trial])
for event_end in np.argwhere(np.diff(events[:,cell,trial].astype(int)) == -1):
j = event_end
while (j<time) and ((events[j,cell,trial]) or (falling_thresh_events[j])):
events[j,cell,trial] = events[j-1,cell,trial]
j = j + 1
# finally label the event array and return it.
events = mahotas.label(events>0, np.array([1,1])[:,np.newaxis,np.newaxis])[0]
return np.squeeze(events)
def findEventsGMM(traces, stds, std_threshold=2.5, falling_std_threshold=None, boxWidth=3, minimum_length=2):
"""Wrapper for findEvents with baseline estimation using a mixture of gaussians model.
The major idea here is to use a mixture of two gaussians to model
the baselines within each trial as a mixture of two gaussians -
one for the 'baseline' and one for all the 'bright' responding
pixels. At each time point, the ROI brightnesses are fit with
with this GMM. The means of the two distributions are initialized
to the background 'cell' and all points brighter than the mean of
all ROIs. After fitting, the smaller of the two means at every
point is taken to be the 'background'. This generally is very
close to the average of the entire frame, but is generally smaller
during full field events, because the larger gaussian 'sucks up'
the spurious bright pixels.
See getGMMBaselines() for more information.
:param: traces - 2 or 3d numpy array of baselined and normalized traces (time x cells, or time x cells x trials)
:param: stds - 1 or 2d numpy event array of per-cell std values (cells, or cells x trials)
:param: std_threshold - multiple of per-cell STD to use for an event (float)
:param: falling_std_threshold - optional multiple of per-cell STD to use as end of an event (float)
:param: baselines - optional estimation of the baseline values of the cells
:param: boxWidth - filter size for smoothing traces and background values before detection
:param: minimum_length - minimum length of an event
:returns: numpy array same shape and size of traces, with each event given a unique integer label
"""
if traces.ndim == 2:
traces = np.atleast_3d(traces) # time x cells x trials
stds = np.atleast_2d(stds).T # cells x trials
baselines = getGMMBaselines(traces) # time x trials (one population baseline trace for all cells)
return findEvents(traces, stds, std_threshold, falling_std_threshold, baselines, boxWidth, minimum_length)
def findEventsBackground(traces, stds, std_threshold=2.5, falling_std_threshold=None, boxWidth=3, minimum_length=2):
"""Wrapper for findEvents with baseline estimation using the background..
Here, we estimate the population baseline for all the cells as the
'background cell', or cell 0. It is generally a fair estimation
of the general response of the field of view, but is imperfect due
to segmentation errors.
:param: traces - 2 or 3d numpy array of baselined and normalized traces (time x cells, or time x cells x trials)
:param: stds - 1 or 2d numpy event array of per-cell std values (cells, or cells x trials)
:param: std_threshold - multiple of per-cell STD to use for an event (float)
:param: falling_std_threshold - optional multiple of per-cell STD to use as end of an event (float)
:param: baselines - optional estimation of the baseline values of the cells
:param: boxWidth - filter size for smoothing traces and background values before detection
:param: minimum_length - minimum length of an event
:returns: numpy array same shape and size of traces, with each event given a unique integer label
"""
if traces.ndim == 2:
traces = np.atleast_3d(traces) # time x cells x trials
stds = np.atleast_2d(stds).T # cells x trials
baselines = traces[:,0,:].copy() # time x trials (one population baseline trace for all cells)
return findEvents(traces, stds, std_threshold, falling_std_threshold, baselines, boxWidth, minimum_length)
#----------------------------------------EVENT UTILITY FUNCTIONS-----------------------------------
def getStartsAndStops(event_array):
"""This routine takes an event_array and returns the starting and
stopping times for all events in the array.
:param: event_array - 2d or 3d numpy event array (time x cells, or time x cells x trials))
:returns: masked numpy arrays, one for starting times and stopping times.
size is cells x max event number or cells x trials x max event number.
masked array is to account for the variable number of events in each cell
"""
event_array = np.atleast_3d(event_array)
max_num_events = getCounts(event_array).max()
time, cells, trials = event_array.shape
starts = np.zeros((cells, trials, int(max_num_events)))
stops = np.zeros((cells, trials, int(max_num_events)))
starts[:] = np.nan
stops[:] = np.nan
for cell in range(cells):
for trial in range(trials):
event_ids = np.unique(event_array[:,cell,trial])[1:]
for i, event_id in enumerate(event_ids):
starts[cell, trial, i] = np.argwhere(event_array[:,cell,trial] == event_id).flatten()[0]
stops[cell, trial, i] = np.argwhere(event_array[:,cell,trial] == event_id).flatten()[-1]
starts = np.ma.array(starts, mask=np.isnan(starts))
starts = np.squeeze(starts)
stops = np.ma.array(stops, mask=np.isnan(stops))
stops = np.squeeze(stops)
return starts, stops
def getCounts(event_array, time_range=None):
"""This routine takes an event_array and optionally a time range
and returns the number of events in each cell.
:param: event_array - 2 or 3d numpy event array (time x cells or time x cells x trials)
:param: time_range - optional list of 2 numbers limiting the time range to count events
:returns: 1d or 2d numpy array of counts (cells or cells x trials)
"""
if time_range is not None:
event_array = event_array[time_range[0]:time_range[1],:] # note that this works for 2 or 3d arrays...
if event_array.ndim is 2:
event_array = event_array[:,:,np.newaxis]
time, cells, trials = event_array.shape
counts = np.zeros((cells,trials))
for trial in range(trials):
for cell in range(cells):
counts[cell, trial] = np.unique(event_array[:,cell,trial]).size - 1
return np.squeeze(counts)
def getDurations(event_array, time_range=None):
"""This routine takes an event_array (time x cells) and returns
the duration of events in each cell.
:param: event_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:param: time_range - optional list of 2 numbers limiting the time range to count events
:returns: 2d masked numpy array of event durations. size is cells x largest number of events.
masked entries are to account for variable number of events
"""
event_array = np.atleast_3d(event_array)
max_num_events = getCounts(event_array).max()
time, cells, trials = event_array.shape
durations = np.zeros((cells, trials, int(max_num_events)))
durations[:] = np.nan
for cell in range(cells):
for trial in range(trials):
event_ids = np.unique(event_array[:,cell,trial])[1:]
for i, event_id in enumerate(event_ids):
durations[cell, trial, i] = np.argwhere(event_array[:,cell,trial] == event_id).size
durations = np.ma.array(durations, mask=np.isnan(durations))
durations = np.squeeze(durations)
return durations
def getAvgAmplitudes(event_array, trace_array, time_range=None):
"""This routine takes an event_array (time x cells) and
corresponding trace array and returns the average amplitudes of
events in each cell.
:param: event_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:param: time_range - optional list of 2 numbers limiting the time range to count events
:returns: 2d masked numpy array of event average amplitudes. size is cells x largest number of events.
masked entries are account for variable number of events
"""
event_array = np.atleast_3d(event_array)
trace_array= np.atleast_3d(trace_array)
max_num_events = getCounts(event_array).max()
time, cells, trials = event_array.shape
amps = np.zeros((cells, trials, int(max_num_events)))
amps[:] = np.nan
for cell in range(cells):
for trial in range(trials):
event_ids = np.unique(event_array[:,cell,trial])[1:]
for i, event_id in enumerate(event_ids):
amps[cell, trial, i] = trace_array[event_array == event_id].mean()
amps = np.ma.array(amps, mask=np.isnan(amps))
amps = np.squeeze(amps)
return np.ma.masked_array(amps, np.isnan(amps))
def getWeightedEvents(event_array, trace_array):
"""This routine takes an event array and corresponding trace array
and replaces the event labels with the average amplitude of the
event.
:param: event_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:param: trace_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:returns: 2d numpy array same shape and size of event_array, zero where there
weren't events, and the average event amplitude for the event otherwise.
"""
weighted_events = np.zeros_like(event_array, dtype=float)
for i in np.unique(event_array)[1:]:
weighted_events[event_array==i] = trace_array[event_array==i].mean()
return weighted_events
#----------------------------------------GMM UTILITY FUNCTIONS-----------------------------------
def fitGaussianMixture1D(data, n=2, set_mean_priors=True):
"""Routine for fitting a 1d array to a mixture of `n` gaussians.
if 'set_mean_priors' is True (the default), we initialize the GMM
model with means equal to the first point (the 'background' cell)
and all ROIs larger than the mean. Otherwise, we have random means.
After fitting, we return the means, stds, and weights of the GMM,
along with the BIC, AIC, and the model itself.
:param: data - 1d array of data to fit
:param: n - number of gaussians to fit, defaults to 2
:param: set_mean_priors - boolean, if true, initializes the means of a mixture of 2 gaussians
:returns: tuple of (means, stds, weights, BIC, AIC, GMM model object)
"""
if set_mean_priors:
g = GMM(n_components=n, init_params='wc', n_init=5)
g.means_ = np.zeros((n, 1))
g.means_[0,0] = data[0] # first datapoint is the background value... should be near 0.0
g.means_[1,0] = data[data > data[0]].mean()
else:
g = GMM(n_components=n, n_init=5)
g.fit(data)
return (np.squeeze(g.means_.flatten()),
np.squeeze(np.sqrt(g.covars_).flatten()),
np.squeeze(g.weights_).flatten(),
g.bic(data),
g.aic(data),
g)
def getGMMBaselines(traces):
"""Wrapper for fitGaussianMixture1D() for findEventsGMM().
:param: traces - 2 or 3d numpy array of dF/F (time x cells, or time x cells x trials)
:returns: 1 or 2d numpy array of estimated baseline (time or time x trials).
"""
traces = np.atleast_3d(traces) # time x cells x trials
time, cells, trials = traces.shape
gmmBaselines = np.zeros((time, trials)) # one baseline estimation for each trial
for trial in range(trials):
for frame in range(time):
means, stds, weights, bic, aic, model = fitGaussianMixture1D(traces[frame,:,trial], 2)
gmmBaselines[frame, trial] = means.min()
return gmmBaselines
#----------------------------------------DEPRECATED EVENT FINDING FUNCTIONS-----------------------------------
def findEventsAtThreshold(traces, stds, rising_threshold, falling_threshold=0.75, first_mode='rising', second_mode='falling', boxWidth=3, distance_cutoff=2):
"""----------------DEPRECATED-----------------------------
Routine to find events based on the method in Dombeck et al., 2007.
Relies on the multi-dimensional findLevels function in traceRoutines.
Finds all two sets of points in `traces` that cross threshold multiples
of `stds`. The first_mode and second_mode parameters determine if the
crossings are rising, or falling. The trace is filtered with a flat
kernel of width `boxWidth` and successive crossings are paired. Any
crossings less that `distance_cutoff` apart are discarded.
This routine is called by findEventsDombeck().
:param: traces - 2 or 3d numpy array of dF/F traces (time x cells, or time x cells x trial)
:param: stds - 1 or 2d numpy array of values representing noise levels in the data (cells, or cells x trials)
:param: rising_threshold - float used for first crossings
:param: falling_threshold - float used for second crossings
:param: boxWidth - filter size
:param: distance_cutoff - eliminate crossings pairs closer than this- eliminates noise
:returns: 2d or 3d array same size and dimension as traces, labeled with event number
"""
# insure that we have at least one 'trial' dimension.
if traces.ndim == 2:
traces = np.atleast_3d(traces)
stds = np.atleast_2d(stds)
time, cells, trials = traces.shape
# normally tm.findLevels works with a single number, but if the shapes are right then it will broadcast correctly with a larger array
first_crossings = tm.findLevelsNd(traces, np.array(stds)*rising_threshold, mode=first_mode, axis=0, boxWidth=boxWidth)
second_crossings = tm.findLevelsNd(traces, np.array(stds)*falling_threshold, mode=second_mode, axis=0, boxWidth=boxWidth)
events = np.zeros_like(traces)
i=1
for cell in range(cells):
for trial in range(trials):
rising_event_locations = np.where(first_crossings[:,cell,trial])[0] # peel off the tuple
falling_event_locations = np.where(second_crossings[:,cell,trial])[0] # peel off the tuple
possible_pairs = []
for r in rising_event_locations:
if possible_pairs:
prev_rising = zip(*possible_pairs)[0]
prev_falling = zip(*possible_pairs)[1]
if r <= prev_falling[-1]:
continue
try:
f = falling_event_locations[np.searchsorted(falling_event_locations, r)]
possible_pairs.append([r,f])
except IndexError:
possible_pairs.append([r,time])
for pair in possible_pairs:
if pair[1]-pair[0] > distance_cutoff:
events[pair[0]:pair[1], cell, trial] = i
i = i+1
return np.squeeze(events)
def findEventsDombeck(traces, stds, false_positive_rate=0.05, lower_sigma=1, upper_sigma=5, boxWidth=3, distance_cutoff=2):
"""----------------DEPRECATED-----------------------------
This routine uses findEventsAtThreshold() at a range of thresholds to
detect both postive and going events, and calculates a false positive
rate based on the percentage of total negative events
(see Dombeck et al. 2007). It then calculates the threshold closest to
the specificed false postive rate and returns that event array for
positive going events.
The falling value is hardcoded at 0.75 * std of baseline, as per Dombeck et al. 2007.
:param: traces - 2 or 3d numpy array of traces (time x cells or time x cells x trials)
:param: stds - 1 or 2d numpy array of values representing noise levels in the data (cells, or cells x trials)
:param: false_positive_rate - float value of desired false positive rate (0.05 = 5%)
:param: lower_sigma - starting point for scan
:param: upper_sigma - stopping point for scan
:param: boxWidth - window size for pre-smoothing
:param: distance_cutoff - minimum length of event
:returns: events array for traces at desired false positive rate
"""
all_events = []
for sigma in np.arange(lower_sigma, upper_sigma, 0.125):
pos_events = findEventsAtThreshold(traces, stds, sigma, 0.75, first_mode='rising', second_mode='falling', boxWidth=boxWidth, distance_cutoff=distance_cutoff)
neg_events = findEventsAtThreshold(traces, stds, -sigma, -0.75, first_mode='falling', second_mode='rising', boxWidth=boxWidth, distance_cutoff=distance_cutoff)
temp_false_positive_rate = neg_events.max() / (pos_events.max() + neg_events.max())
all_events.append((sigma, pos_events.max(), neg_events.max(), temp_false_positive_rate, pos_events, neg_events))
closest_to_false_pos = np.argmin(np.abs(np.array(zip(*all_events)[3])-false_positive_rate)) # get all false positive rates, find index closest to 0.05
print 'Using sigma cutoff of: ' + str(all_events[closest_to_false_pos][0]) # get the right sigma
return all_events[closest_to_false_pos][4] # pos events are 4th in tuple
| mit | 7,388,882,840,149,779,000 | 49.211416 | 179 | 0.666695 | false |
romses/FitView | fitparse/records.py | 1 | 11248 | import math
import struct
try:
from itertools import izip_longest
except:
from itertools import zip_longest as izip_longest
from numbers import Number
class RecordBase(object):
# namedtuple-like base class. Subclasses should must __slots__
__slots__ = ()
# TODO: switch back to namedtuple, and don't use default arguments as None
# and see if that gives us any performance improvements
def __init__(self, *args, **kwargs):
# WARNING: use of map(None, l1, l2) equivalent to zip_longest in py3k
for slot_name, value in izip_longest(self.__slots__, args):
# map(None, self.__slots__, args):
setattr(self, slot_name, value)
for slot_name, value in kwargs.items():
setattr(self, slot_name, value)
class MessageHeader(RecordBase):
__slots__ = ('is_definition', 'local_mesg_num', 'time_offset')
def __repr__(self):
return '<MessageHeader: %s -- local mesg: #%d%s>' % (
'definition' if self.is_definition else 'data',
self.local_mesg_num,
', time offset: %d' % self.time_offset
if self.time_offset else '', )
class DefinitionMessage(RecordBase):
__slots__ = ('header', 'endian', 'mesg_type', 'mesg_num', 'field_defs')
type = 'definition'
@property
def name(self):
return self.mesg_type.name if self.mesg_type else 'unknown_%d' % self.mesg_num
def __repr__(self):
return '<DefinitionMessage: %s (#%d) -- local mesg: #%d, field defs: [%s]>' % (
self.name,
self.mesg_num,
self.header.local_mesg_num,
', '.join([fd.name for fd in self.field_defs]), )
class FieldDefinition(RecordBase):
__slots__ = ('field', 'def_num', 'base_type', 'size')
@property
def name(self):
return self.field.name if self.field else 'unknown_%d' % self.def_num
@property
def type(self):
return self.field.type if self.field else self.base_type
def __repr__(self):
return '<FieldDefinition: %s (#%d) -- type: %s (%s), size: %d byte%s>' % (
self.name,
self.def_num,
self.type.name,
self.base_type.name,
self.size,
's' if self.size != 1 else '', )
class DataMessage(RecordBase):
__slots__ = ('header', 'def_mesg', 'fields')
type = 'data'
def get(self, field_name, as_dict=False):
# SIMPLIFY: get rid of as_dict
for field_data in self.fields:
if field_data.is_named(field_name):
return field_data.as_dict() if as_dict else field_data
def get_value(self, field_name):
# SIMPLIFY: get rid of this completely
field_data = self.get(field_name)
if field_data:
return field_data.value
def get_values(self):
# SIMPLIFY: get rid of this completely
return dict((f.name if f.name else f.def_num, f.value)
for f in self.fields)
@property
def name(self):
return self.def_mesg.name
@property
def mesg_num(self):
# SIMPLIFY: get rid of this
return self.def_mesg.mesg_num
@property
def mesg_type(self):
# SIMPLIFY: get rid of this
return self.def_mesg.mesg_type
def as_dict(self):
# TODO: rethink this format
return {
'name': self.name,
'fields': [f.as_dict() for f in self.fields],
}
def __iter__(self):
# Sort by whether this is a known field, then its name
return iter(
sorted(
self.fields, key=lambda fd: (int(fd.field is None), fd.name)))
def __repr__(self):
return '<DataMessage: %s (#%d) -- local mesg: #%d, fields: [%s]>' % (
self.name,
self.mesg_num,
self.header.local_mesg_num,
', '.join(
["%s: %s" % (fd.name, fd.value) for fd in self.fields]), )
def __str__(self):
# SIMPLIFY: get rid of this
return '%s (#%d)' % (self.name, self.mesg_num)
class FieldData(RecordBase):
__slots__ = ('field_def', 'field', 'parent_field', 'value', 'raw_value',
'units')
def __init__(self, *args, **kwargs):
super(FieldData, self).__init__(self, *args, **kwargs)
if not self.units and self.field:
# Default to units on field, otherwise None.
# NOTE:Not a property since you may want to override this in a data processor
self.units = self.field.units
@property
def name(self):
return self.field.name if self.field else 'unknown_%d' % self.def_num
# TODO: Some notion of flags
def is_named(self, name):
if self.field:
if name in (self.field.name, self.field.def_num):
return True
if self.parent_field:
if name in (self.parent_field.name, self.parent_field.def_num):
return True
if self.field_def:
if name == self.field_def.def_num:
return True
return False
@property
def def_num(self):
# Prefer to return the def_num on the field
# since field_def may be None if this field is dynamic
return self.field.def_num if self.field else self.field_def.def_num
@property
def base_type(self):
# Try field_def's base type, if it doesn't exist, this is a
# dynamically added field, so field doesn't be None
return self.field_def.base_type if self.field_def else self.field.base_type
@property
def is_base_type(self):
return self.field.is_base_type if self.field else True
@property
def type(self):
return self.field.type if self.field else self.base_type
@property
def field_type(self):
return self.field.field_type if self.field else 'field'
def as_dict(self):
return {
'name': self.name,
'def_num': self.def_num,
'base_type': self.base_type.name,
'type': self.type.name,
'units': self.units,
'value': self.value,
'raw_value': self.raw_value,
}
def __repr__(self):
return '<FieldData: %s: %s%s, def num: %d, type: %s (%s), raw value: %s>' % (
self.name,
self.value,
' [%s]' % self.units if self.units else '',
self.def_num,
self.type.name,
self.base_type.name,
self.raw_value, )
def __str__(self):
return '%s: %s%s' % (
self.name,
self.value,
' [%s]' % self.units if self.units else '', )
class BaseType(RecordBase):
__slots__ = ('name', 'identifier', 'fmt', 'parse')
values = None # In case we're treated as a FieldType
@property
def size(self):
return struct.calcsize(self.fmt)
@property
def type_num(self):
return self.identifier & 0x1F
def __repr__(self):
return '<BaseType: %s (#%d [0x%X])>' % (
self.name,
self.type_num,
self.identifier, )
class FieldType(RecordBase):
__slots__ = ('name', 'base_type', 'values')
def __repr__(self):
return '<FieldType: %s (%s)>' % (self.name, self.base_type)
class MessageType(RecordBase):
__slots__ = ('name', 'mesg_num', 'fields')
def __repr__(self):
return '<MessageType: %s (#%d)>' % (self.name, self.mesg_num)
class FieldAndSubFieldBase(RecordBase):
__slots__ = ()
@property
def base_type(self):
return self.type if self.is_base_type else self.type.base_type
@property
def is_base_type(self):
return isinstance(self.type, BaseType)
def render(self, raw_value):
if self.type.values and (raw_value in self.type.values):
return self.type.values[raw_value]
return raw_value
class Field(FieldAndSubFieldBase):
__slots__ = ('name', 'type', 'def_num', 'scale', 'offset', 'units',
'components', 'subfields')
field_type = 'field'
class SubField(FieldAndSubFieldBase):
__slots__ = ('name', 'def_num', 'type', 'scale', 'offset', 'units',
'components', 'ref_fields')
field_type = 'subfield'
class ReferenceField(RecordBase):
__slots__ = ('name', 'def_num', 'value', 'raw_value')
class ComponentField(RecordBase):
__slots__ = ('name', 'def_num', 'scale', 'offset', 'units', 'accumulate',
'bits', 'bit_offset')
field_type = 'component'
def render(self, raw_value):
if raw_value is None:
return None
# If it's a tuple, then it's a byte array and unpack it as such
# (only type that uses this is compressed speed/distance)
if isinstance(raw_value, tuple):
unpacked_num = 0
# Unpack byte array as little endian
for value in reversed(raw_value):
unpacked_num = (unpacked_num << 8) + value
raw_value = unpacked_num
# Mask and shift like a normal number
if isinstance(raw_value, Number):
raw_value = (raw_value >> self.bit_offset) & ((1 << self.bits) - 1)
return raw_value
# The default base type
BASE_TYPE_BYTE = BaseType(
name='byte',
identifier=0x0D,
fmt='B',
parse=lambda x: None if all(b == 0xFF for b in x) else x)
BASE_TYPES = {
0x00: BaseType(
name='enum',
identifier=0x00,
fmt='B',
parse=lambda x: None if x == 0xFF else x),
0x01: BaseType(
name='sint8',
identifier=0x01,
fmt='b',
parse=lambda x: None if x == 0x7F else x),
0x02: BaseType(
name='uint8',
identifier=0x02,
fmt='B',
parse=lambda x: None if x == 0xFF else x),
0x83: BaseType(
name='sint16',
identifier=0x83,
fmt='h',
parse=lambda x: None if x == 0x7FFF else x),
0x84: BaseType(
name='uint16',
identifier=0x84,
fmt='H',
parse=lambda x: None if x == 0xFFFF else x),
0x85: BaseType(
name='sint32',
identifier=0x85,
fmt='i',
parse=lambda x: None if x == 0x7FFFFFFF else x),
0x86: BaseType(
name='uint32',
identifier=0x86,
fmt='I',
parse=lambda x: None if x == 0xFFFFFFFF else x),
0x07: BaseType(
name='string',
identifier=0x07,
fmt='s',
parse=lambda x: x.split(b'\x00')[0] or None),
0x88: BaseType(
name='float32',
identifier=0x88,
fmt='f',
parse=lambda x: None if math.isnan(x) else x),
0x89: BaseType(
name='float64',
identifier=0x89,
fmt='d',
parse=lambda x: None if math.isnan(x) else x),
0x0A: BaseType(
name='uint8z',
identifier=0x0A,
fmt='B',
parse=lambda x: None if x == 0x0 else x),
0x8B: BaseType(
name='uint16z',
identifier=0x8B,
fmt='H',
parse=lambda x: None if x == 0x0 else x),
0x8C: BaseType(
name='uint32z',
identifier=0x8C,
fmt='I',
parse=lambda x: None if x == 0x0 else x),
0x0D: BASE_TYPE_BYTE,
}
| bsd-3-clause | -8,365,860,220,540,263,000 | 28.291667 | 89 | 0.55192 | false |
bruceg/ezmlm-browse | main.py | 1 | 4708 | import cgitb
cgitb.enable()
import cgi
import email
import os
import sys
import time
import zipfile
import Cookie
import ezmlm
from globals import *
from globalfns import *
import context
import config
###############################################################################
# Main routine
###############################################################################
def load_form():
#if not os.environ['QUERY_STRING']:
# return { }
cgi.maxlen = 64*1024
cgiform = cgi.FieldStorage()
form = { }
for key in cgiform.keys():
item = cgiform[key]
if type(item) is not type([]) and \
not item.file:
form[key] = item.value
return form
def setup_list(ctxt):
list = ctxt[LIST]
if list:
try:
base = config.archives[list]
except KeyError:
die(ctxt, 'Unknown list: ' + list)
ctxt.update(base)
eza = ctxt[EZMLM] = ezmlm.EzmlmArchive(ctxt[LISTDIR])
if ctxt.has_key(MSGNUM):
ctxt.update(eza.index[ctxt[MSGNUM]])
if ctxt.has_key(THREADID):
ctxt.update(eza.thread(ctxt[THREADID]))
eza.set_months(ctxt)
if ctxt[TZ] and ctxt[TZ] <> 'None':
os.environ['TZ'] = ctxt[TZ]
def die_no_download(ctxt):
die(ctxt, "Downloading raw messages is administratively prohibited.")
def dump_part(ctxt, part):
if not config.allowraw \
and not ( part.get_content_maintype() == 'image'
and config.allowraw_image ):
die_no_download(ctxt)
write('Content-Type: %s; charset=%s\r\n\r\n' % (
part.get_content_type(),
part.get_content_charset('us-ascii').lower()))
write(part.get_payload(decode=1))
def main_path(ctxt, pathstr):
# FIXME: handle ?part=#.#.#&filename=string
# and then convert sub_showmsg et al to use the same notation
if not config.allowraw and not config.allowraw_image:
die_no_download(ctxt)
pathstr = pathstr.lstrip('/')
path = pathstr.split('/')
ctxt[LIST] = path[0]
try:
msgnum = int(path[1])
except:
die(ctxt, "Invalid path: " + pathstr)
setup_list(ctxt)
msg = ctxt[EZMLM].open(msgnum)
if ctxt.has_key(PART):
parts = map(int, ctxt[PART].split('.'))
part = msg
# FIXME: What the heck am I supposed to be doing with these numbers?!?
if parts[0] != 1:
raise ValueError
parts = parts[1:]
while parts:
part = part.get_payload()[parts[0]]
parts = parts[1:]
dump_part(ctxt, part)
else:
try:
partnum = int(path[2])
for part in msg.walk():
if partnum <= 0:
break
partnum -= 1
dump_part(ctxt, part)
except:
if not config.allowraw:
die_no_download(ctxt)
write('Content-Type: message/rfc822\r\n\r\n')
buf = msg.read(8192)
while buf:
write(buf)
buf = msg.read(8192)
sys.exit(0)
file_content_types = {
'css': 'text/css',
'png': 'image/png',
'gif': 'image/gif',
'jpg': 'image/jpeg',
'jpeg': 'image/jpeg',
}
def main_file(filename):
path = os.path.join('files', filename)
try:
st = os.stat(path)
timestamp = st.st_mtime
data = open(path).read()
except OSError:
zf = zipfile.ZipFile(sys.argv[0])
data = zf.open(path).read()
timestamp = time.mktime(zf.getinfo(path).date_time + (0, 0, 0))
timestamp = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(timestamp))
ext = filename[filename.rindex('.')+1:].lower()
ct = file_content_types[ext]
sys.stdout.write('Content-Type: %s\r\n'
'Content-Length: %i\r\n'
'Last-Modified: %s\r\n'
'\r\n' % ( ct, len(data), timestamp ))
sys.stdout.write(data)
sys.stdout.flush()
sys.exit(0)
def import_command(command):
commands = __import__('commands', fromlist=[command])
try:
return getattr(commands, command)
except AttributeError:
raise ImportError, "Could not locate command: " + command
def main_form(ctxt):
setup_list(ctxt)
if ctxt.has_key('command'): ctxt[COMMAND] = ctxt['command']
if '/' in ctxt[COMMAND]:
die(ctxt, "Invalid command")
if not ctxt[LIST]:
ctxt[COMMAND] = 'lists'
try:
module = import_command(ctxt[COMMAND])
except ImportError:
die(ctxt, "Invalid command")
module.do(ctxt)
def main():
try:
path = os.environ['PATH_INFO']
except KeyError:
path = None
else:
if path.startswith('/files/'):
main_file(path[7:])
ctxt = context.ctxt = context.Context()
# Insert the environment (CGI) variables
ctxt.update(os.environ)
# Update with defaults from the config
ctxt.update(config.defaults)
# Update with all cookies
for c in Cookie.SimpleCookie(os.environ.get('HTTP_COOKIE', '')).values():
ctxt[c.key] = c.value
form = context.form = load_form()
ctxt.update(form)
# Override certain context values based on configured settings
ctxt[ALLOWRAW] = config.allowraw
ctxt[FILESPREFIX] = config.filesprefix or os.environ['SCRIPT_NAME'] + '/files/'
if path is not None:
main_path(ctxt, path)
else:
main_form(ctxt)
| gpl-2.0 | 8,600,431,670,751,673,000 | 24.448649 | 80 | 0.643161 | false |
tommyip/zulip | zerver/tests/test_events.py | 1 | 146684 | # -*- coding: utf-8 -*-
# See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for
# high-level documentation on how this system works.
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import copy
import os
import shutil
import sys
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.utils.timezone import now as timezone_now
from io import StringIO
from zerver.models import (
get_client, get_stream_recipient, get_stream, get_realm, get_system_bot,
Message, RealmDomain, Recipient, UserMessage, UserPresence, UserProfile,
Realm, Subscription, Stream, flush_per_request_caches, UserGroup, Service,
Attachment, PreregistrationUser, get_user_by_delivery_email, MultiuseInvite
)
from zerver.lib.actions import (
try_update_realm_custom_profile_field,
bulk_add_subscriptions,
bulk_remove_subscriptions,
check_add_realm_emoji,
check_send_message,
check_send_typing_notification,
do_add_alert_words,
do_add_default_stream,
do_add_reaction,
do_add_reaction_legacy,
do_add_realm_domain,
do_add_realm_filter,
do_add_streams_to_default_stream_group,
do_add_submessage,
do_change_avatar_fields,
do_change_bot_owner,
do_change_default_all_public_streams,
do_change_default_events_register_stream,
do_change_default_sending_stream,
do_change_default_stream_group_description,
do_change_default_stream_group_name,
do_change_full_name,
do_change_icon_source,
do_change_logo_source,
do_change_is_admin,
do_change_is_guest,
do_change_notification_settings,
do_change_plan_type,
do_change_realm_domain,
do_change_stream_description,
do_change_stream_invite_only,
do_change_stream_announcement_only,
do_change_subscription_property,
do_change_user_delivery_email,
do_create_user,
do_create_default_stream_group,
do_create_multiuse_invite_link,
do_deactivate_stream,
do_deactivate_user,
do_delete_messages,
do_invite_users,
do_mark_hotspot_as_read,
do_mute_topic,
do_reactivate_user,
do_regenerate_api_key,
do_remove_alert_words,
do_remove_default_stream,
do_remove_default_stream_group,
do_remove_reaction,
do_remove_reaction_legacy,
do_remove_realm_domain,
do_remove_realm_emoji,
do_remove_realm_filter,
do_remove_streams_from_default_stream_group,
do_rename_stream,
do_revoke_multi_use_invite,
do_revoke_user_invite,
do_set_realm_authentication_methods,
do_set_realm_message_editing,
do_set_realm_property,
do_set_user_display_setting,
do_set_realm_notifications_stream,
do_set_realm_signup_notifications_stream,
do_unmute_topic,
do_update_embedded_data,
do_update_message,
do_update_message_flags,
do_update_outgoing_webhook_service,
do_update_pointer,
do_update_user_presence,
do_update_user_status,
get_typing_user_profiles,
log_event,
lookup_default_stream_groups,
notify_realm_custom_profile_fields,
check_add_user_group,
do_update_user_group_name,
do_update_user_group_description,
bulk_add_members_to_user_group,
remove_members_from_user_group,
check_delete_user_group,
do_update_user_custom_profile_data,
)
from zerver.lib.events import (
apply_events,
fetch_initial_state_data,
get_raw_user_data,
post_process_state,
)
from zerver.lib.message import (
aggregate_unread_data,
get_raw_unread_data,
render_markdown,
UnreadMessagesResult,
)
from zerver.lib.test_helpers import POSTRequestMock, get_subscription, \
get_test_image_file, stub_event_queue_user_events, queries_captured, \
create_dummy_file
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.topic import (
ORIG_TOPIC,
TOPIC_NAME,
TOPIC_LINKS,
)
from zerver.lib.topic_mutes import (
add_topic_mute,
)
from zerver.lib.validator import (
check_bool, check_dict, check_dict_only, check_float, check_int, check_list, check_string,
equals, check_none_or, Validator, check_url
)
from zerver.lib.users import get_api_key
from zerver.views.events_register import _default_all_public_streams, _default_narrow
from zerver.tornado.event_queue import (
allocate_client_descriptor,
clear_client_event_queues_for_testing,
get_client_info_for_message_event,
process_message_event,
)
from zerver.tornado.views import get_events
import mock
import time
import ujson
class LogEventsTest(ZulipTestCase):
def test_with_missing_event_log_dir_setting(self) -> None:
with self.settings(EVENT_LOG_DIR=None):
log_event(dict())
def test_log_event_mkdir(self) -> None:
dir_name = os.path.join(settings.TEST_WORKER_DIR, "test-log-dir")
try:
shutil.rmtree(dir_name)
except OSError: # nocoverage
# assume it doesn't exist already
pass
self.assertFalse(os.path.exists(dir_name))
with self.settings(EVENT_LOG_DIR=dir_name):
event = {} # type: Dict[str, int]
log_event(event)
self.assertTrue(os.path.exists(dir_name))
class EventsEndpointTest(ZulipTestCase):
def test_events_register_endpoint(self) -> None:
# This test is intended to get minimal coverage on the
# events_register code paths
email = self.example_email("hamlet")
with mock.patch('zerver.views.events_register.do_events_register', return_value={}):
result = self.api_post(email, '/json/register')
self.assert_json_success(result)
with mock.patch('zerver.lib.events.request_event_queue', return_value=None):
result = self.api_post(email, '/json/register')
self.assert_json_error(result, "Could not allocate event queue")
return_event_queue = '15:11'
return_user_events = [] # type: List[Dict[str, Any]]
# Test that call is made to deal with a returning soft deactivated user.
with mock.patch('zerver.lib.events.reactivate_user_if_soft_deactivated') as fa:
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register', dict(event_types=ujson.dumps(['pointer'])))
self.assertEqual(fa.call_count, 1)
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register', dict(event_types=ujson.dumps(['pointer'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], -1)
self.assertEqual(result_dict['queue_id'], '15:11')
return_event_queue = '15:12'
return_user_events = [
{
'id': 6,
'type': 'pointer',
'pointer': 15,
}
]
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register', dict(event_types=ujson.dumps(['pointer'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], 6)
self.assertEqual(result_dict['pointer'], 15)
self.assertEqual(result_dict['queue_id'], '15:12')
# Now test with `fetch_event_types` not matching the event
return_event_queue = '15:13'
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register',
dict(event_types=ujson.dumps(['pointer']),
fetch_event_types=ujson.dumps(['message'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], 6)
# Check that the message event types data is in there
self.assertIn('max_message_id', result_dict)
# Check that the pointer event types data is not in there
self.assertNotIn('pointer', result_dict)
self.assertEqual(result_dict['queue_id'], '15:13')
# Now test with `fetch_event_types` matching the event
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register',
dict(fetch_event_types=ujson.dumps(['pointer']),
event_types=ujson.dumps(['message'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], 6)
# Check that we didn't fetch the messages data
self.assertNotIn('max_message_id', result_dict)
# Check that the pointer data is in there, and is correctly
# updated (presering our atomicity guaranteed), though of
# course any future pointer events won't be distributed
self.assertIn('pointer', result_dict)
self.assertEqual(result_dict['pointer'], 15)
self.assertEqual(result_dict['queue_id'], '15:13')
def test_tornado_endpoint(self) -> None:
# This test is mostly intended to get minimal coverage on
# the /notify_tornado endpoint, so we can have 100% URL coverage,
# but it does exercise a little bit of the codepath.
post_data = dict(
data=ujson.dumps(
dict(
event=dict(
type='other'
),
users=[self.example_user('hamlet').id],
),
),
)
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_error(result, 'Access denied', status_code=403)
post_data['secret'] = settings.SHARED_SECRET
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_success(result)
class GetEventsTest(ZulipTestCase):
def tornado_call(self, view_func: Callable[[HttpRequest, UserProfile], HttpResponse],
user_profile: UserProfile,
post_data: Dict[str, Any]) -> HttpResponse:
request = POSTRequestMock(post_data, user_profile)
return view_func(request, user_profile)
def test_get_events(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.email
recipient_user_profile = self.example_user('othello')
recipient_email = recipient_user_profile.email
self.login(email)
result = self.tornado_call(get_events, user_profile,
{"apply_markdown": ujson.dumps(True),
"client_gravatar": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
recipient_result = self.tornado_call(get_events, recipient_user_profile,
{"apply_markdown": ujson.dumps(True),
"client_gravatar": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(recipient_result)
recipient_queue_id = ujson.loads(recipient_result.content)["queue_id"]
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
local_id = '10.01'
check_send_message(
sender=user_profile,
client=get_client('whatever'),
message_type_name='private',
message_to=[recipient_email],
topic_name=None,
message_content='hello',
local_id=local_id,
sender_queue_id=queue_id,
)
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
self.assertEqual(events[0]["message"]["display_recipient"][0]["is_mirror_dummy"], False)
self.assertEqual(events[0]["message"]["display_recipient"][1]["is_mirror_dummy"], False)
last_event_id = events[0]["id"]
local_id = '10.02'
check_send_message(
sender=user_profile,
client=get_client('whatever'),
message_type_name='private',
message_to=[recipient_email],
topic_name=None,
message_content='hello',
local_id=local_id,
sender_queue_id=queue_id,
)
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": last_event_id,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
# Test that the received message in the receiver's event queue
# exists and does not contain a local id
recipient_result = self.tornado_call(get_events, recipient_user_profile,
{"queue_id": recipient_queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
recipient_events = ujson.loads(recipient_result.content)["events"]
self.assert_json_success(recipient_result)
self.assertEqual(len(recipient_events), 2)
self.assertEqual(recipient_events[0]["type"], "message")
self.assertEqual(recipient_events[0]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[0])
self.assertEqual(recipient_events[1]["type"], "message")
self.assertEqual(recipient_events[1]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[1])
def test_get_events_narrow(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.email
self.login(email)
def get_message(apply_markdown: bool, client_gravatar: bool) -> Dict[str, Any]:
result = self.tornado_call(
get_events,
user_profile,
dict(
apply_markdown=ujson.dumps(apply_markdown),
client_gravatar=ujson.dumps(client_gravatar),
event_types=ujson.dumps(["message"]),
narrow=ujson.dumps([["stream", "denmark"]]),
user_client="website",
dont_block=ujson.dumps(True),
)
)
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
self.send_personal_message(email, self.example_email("othello"), "hello")
self.send_stream_message(email, "Denmark", "**hello**")
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
return events[0]['message']
message = get_message(apply_markdown=False, client_gravatar=False)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "**hello**")
self.assertIn('gravatar.com', message["avatar_url"])
message = get_message(apply_markdown=True, client_gravatar=False)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "<p><strong>hello</strong></p>")
self.assertIn('gravatar.com', message["avatar_url"])
message = get_message(apply_markdown=False, client_gravatar=True)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "**hello**")
self.assertEqual(message["avatar_url"], None)
message = get_message(apply_markdown=True, client_gravatar=True)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "<p><strong>hello</strong></p>")
self.assertEqual(message["avatar_url"], None)
class EventsRegisterTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user('hamlet')
def create_bot(self, email: str, **extras: Any) -> Optional[UserProfile]:
return self.create_test_bot(email, self.user_profile, **extras)
def realm_bot_schema(self, field_name: str, check: Validator) -> Validator:
return self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
(field_name, check),
])),
])
def do_test(self, action: Callable[[], object], event_types: Optional[List[str]]=None,
include_subscribers: bool=True, state_change_expected: bool=True,
notification_settings_null: bool=False,
client_gravatar: bool=False, num_events: int=1) -> List[Dict[str, Any]]:
'''
Make sure we have a clean slate of client descriptors for these tests.
If we don't do this, then certain failures will only manifest when you
run multiple tests within a single test function.
See also https://zulip.readthedocs.io/en/latest/subsystems/events-system.html#testing
for details on the design of this test system.
'''
clear_client_event_queues_for_testing()
client = allocate_client_descriptor(
dict(user_profile_id = self.user_profile.id,
user_profile_email = self.user_profile.email,
realm_id = self.user_profile.realm_id,
event_types = event_types,
client_type_name = "website",
apply_markdown = True,
client_gravatar = client_gravatar,
all_public_streams = False,
queue_timeout = 600,
last_connection_time = time.time(),
narrow = [])
)
# hybrid_state = initial fetch state + re-applying events triggered by our action
# normal_state = do action then fetch at the end (the "normal" code path)
hybrid_state = fetch_initial_state_data(
self.user_profile, event_types, "",
client_gravatar=True,
include_subscribers=include_subscribers
)
action()
events = client.event_queue.contents()
self.assertEqual(len(events), num_events)
initial_state = copy.deepcopy(hybrid_state)
post_process_state(self.user_profile, initial_state, notification_settings_null)
before = ujson.dumps(initial_state)
apply_events(hybrid_state, events, self.user_profile,
client_gravatar=True, include_subscribers=include_subscribers)
post_process_state(self.user_profile, hybrid_state, notification_settings_null)
after = ujson.dumps(hybrid_state)
if state_change_expected:
if before == after: # nocoverage
print(ujson.dumps(initial_state, indent=2))
print(events)
raise AssertionError('Test does not exercise enough code -- events do not change state.')
else:
try:
self.match_states(initial_state, copy.deepcopy(hybrid_state), events)
except AssertionError: # nocoverage
raise AssertionError('Test is invalid--state actually does change here.')
normal_state = fetch_initial_state_data(
self.user_profile, event_types, "",
client_gravatar=True,
include_subscribers=include_subscribers,
)
post_process_state(self.user_profile, normal_state, notification_settings_null)
self.match_states(hybrid_state, normal_state, events)
return events
def assert_on_error(self, error: Optional[str]) -> None:
if error:
raise AssertionError(error)
def match_states(self, state1: Dict[str, Any], state2: Dict[str, Any],
events: List[Dict[str, Any]]) -> None:
def normalize(state: Dict[str, Any]) -> None:
for u in state['never_subscribed']:
if 'subscribers' in u:
u['subscribers'].sort()
for u in state['subscriptions']:
if 'subscribers' in u:
u['subscribers'].sort()
state['subscriptions'] = {u['name']: u for u in state['subscriptions']}
state['unsubscribed'] = {u['name']: u for u in state['unsubscribed']}
if 'realm_bots' in state:
state['realm_bots'] = {u['email']: u for u in state['realm_bots']}
normalize(state1)
normalize(state2)
# If this assertions fails, we have unusual problems.
self.assertEqual(state1.keys(), state2.keys())
# The far more likely scenario is that some section of
# our enormous payload does not get updated properly. We
# want the diff here to be developer-friendly, hence
# the somewhat tedious code to provide useful output.
if state1 != state2: # nocoverage
print('\n---States DO NOT MATCH---')
print('\nEVENTS:\n')
# Printing out the events is a big help to
# developers.
import json
for event in events:
print(json.dumps(event, indent=4))
print('\nMISMATCHES:\n')
for k in state1:
if state1[k] != state2[k]:
print('\nkey = ' + k)
try:
self.assertEqual({k: state1[k]}, {k: state2[k]})
except AssertionError as e:
print(e)
print('''
NOTE:
This is an advanced test that verifies how
we apply events after fetching data. If you
do not know how to debug it, you can ask for
help on chat.
''')
sys.stdout.flush()
raise AssertionError('Mismatching states')
def check_events_dict(self, required_keys: List[Tuple[str, Validator]]) -> Validator:
required_keys.append(('id', check_int))
# Raise AssertionError if `required_keys` contains duplicate items.
keys = [key[0] for key in required_keys]
self.assertEqual(len(keys), len(set(keys)), 'Duplicate items found in required_keys.')
return check_dict_only(required_keys)
def test_mentioned_send_message_events(self) -> None:
user = self.example_user('hamlet')
for i in range(3):
content = 'mentioning... @**' + user.full_name + '** hello ' + str(i)
self.do_test(
lambda: self.send_stream_message(self.example_email('cordelia'),
"Verona",
content)
)
def test_pm_send_message_events(self) -> None:
self.do_test(
lambda: self.send_personal_message(self.example_email('cordelia'),
self.example_email('hamlet'),
'hola')
)
def test_huddle_send_message_events(self) -> None:
huddle = [
self.example_email('hamlet'),
self.example_email('othello'),
]
self.do_test(
lambda: self.send_huddle_message(self.example_email('cordelia'),
huddle,
'hola')
)
def test_stream_send_message_events(self) -> None:
def check_none(var_name: str, val: object) -> Optional[str]:
assert(val is None)
return None
def get_checker(check_gravatar: Validator) -> Validator:
schema_checker = self.check_events_dict([
('type', equals('message')),
('flags', check_list(None)),
('message', self.check_events_dict([
('avatar_url', check_gravatar),
('client', check_string),
('content', check_string),
('content_type', equals('text/html')),
('display_recipient', check_string),
('is_me_message', check_bool),
('reactions', check_list(None)),
('recipient_id', check_int),
('sender_realm_str', check_string),
('sender_email', check_string),
('sender_full_name', check_string),
('sender_id', check_int),
('sender_short_name', check_string),
('stream_id', check_int),
(TOPIC_NAME, check_string),
(TOPIC_LINKS, check_list(None)),
('submessages', check_list(None)),
('timestamp', check_int),
('type', check_string),
])),
])
return schema_checker
events = self.do_test(
lambda: self.send_stream_message(self.example_email("hamlet"), "Verona", "hello"),
client_gravatar=False,
)
schema_checker = get_checker(check_gravatar=check_string)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(
lambda: self.send_stream_message(self.example_email("hamlet"), "Verona", "hello"),
client_gravatar=True,
)
schema_checker = get_checker(check_gravatar=check_none)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Verify message editing
schema_checker = self.check_events_dict([
('type', equals('update_message')),
('flags', check_list(None)),
('content', check_string),
('edit_timestamp', check_int),
('message_id', check_int),
('message_ids', check_list(check_int)),
('prior_mention_user_ids', check_list(check_int)),
('mention_user_ids', check_list(check_int)),
('presence_idle_user_ids', check_list(check_int)),
('stream_push_user_ids', check_list(check_int)),
('stream_email_user_ids', check_list(check_int)),
('push_notify_user_ids', check_list(check_int)),
('orig_content', check_string),
('orig_rendered_content', check_string),
(ORIG_TOPIC, check_string),
('prev_rendered_content_version', check_int),
('propagate_mode', check_string),
('rendered_content', check_string),
('sender', check_string),
('stream_id', check_int),
('stream_name', check_string),
(TOPIC_NAME, check_string),
(TOPIC_LINKS, check_list(None)),
('user_id', check_int),
('is_me_message', check_bool),
])
message = Message.objects.order_by('-id')[0]
topic = 'new_topic'
propagate_mode = 'change_all'
content = 'new content'
rendered_content = render_markdown(message, content)
prior_mention_user_ids = set() # type: Set[int]
mentioned_user_ids = set() # type: Set[int]
events = self.do_test(
lambda: do_update_message(self.user_profile, message, topic,
propagate_mode, content, rendered_content,
prior_mention_user_ids,
mentioned_user_ids),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Verify do_update_embedded_data
schema_checker = self.check_events_dict([
('type', equals('update_message')),
('flags', check_list(None)),
('content', check_string),
('message_id', check_int),
('message_ids', check_list(check_int)),
('rendered_content', check_string),
('sender', check_string),
])
events = self.do_test(
lambda: do_update_embedded_data(self.user_profile, message,
u"embed_content", "<p>embed_content</p>"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_update_message_flags(self) -> None:
# Test message flag update events
schema_checker = self.check_events_dict([
('all', check_bool),
('type', equals('update_message_flags')),
('flag', check_string),
('messages', check_list(check_int)),
('operation', equals("add")),
])
message = self.send_personal_message(
self.example_email("cordelia"),
self.example_email("hamlet"),
"hello",
)
user_profile = self.example_user('hamlet')
events = self.do_test(
lambda: do_update_message_flags(user_profile, get_client("website"), 'add', 'starred', [message]),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('all', check_bool),
('type', equals('update_message_flags')),
('flag', check_string),
('messages', check_list(check_int)),
('operation', equals("remove")),
])
events = self.do_test(
lambda: do_update_message_flags(user_profile, get_client("website"), 'remove', 'starred', [message]),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_update_read_flag_removes_unread_msg_ids(self) -> None:
user_profile = self.example_user('hamlet')
mention = '@**' + user_profile.full_name + '**'
for content in ['hello', mention]:
message = self.send_stream_message(
self.example_email('cordelia'),
"Verona",
content
)
self.do_test(
lambda: do_update_message_flags(user_profile, get_client("website"), 'add', 'read', [message]),
state_change_expected=True,
)
def test_send_message_to_existing_recipient(self) -> None:
self.send_stream_message(
self.example_email('cordelia'),
"Verona",
"hello 1"
)
self.do_test(
lambda: self.send_stream_message("[email protected]", "Verona", "hello 2"),
state_change_expected=True,
)
def test_add_reaction_legacy(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('add')),
('message_id', check_int),
('emoji_name', check_string),
('emoji_code', check_string),
('reaction_type', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_stream_message(self.example_email("hamlet"), "Verona", "hello")
message = Message.objects.get(id=message_id)
events = self.do_test(
lambda: do_add_reaction_legacy(
self.user_profile, message, "tada"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_remove_reaction_legacy(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('remove')),
('message_id', check_int),
('emoji_name', check_string),
('emoji_code', check_string),
('reaction_type', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_stream_message(self.example_email("hamlet"), "Verona", "hello")
message = Message.objects.get(id=message_id)
do_add_reaction_legacy(self.user_profile, message, "tada")
events = self.do_test(
lambda: do_remove_reaction_legacy(
self.user_profile, message, "tada"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_add_reaction(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('add')),
('message_id', check_int),
('emoji_name', check_string),
('emoji_code', check_string),
('reaction_type', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_stream_message(self.example_email("hamlet"), "Verona", "hello")
message = Message.objects.get(id=message_id)
events = self.do_test(
lambda: do_add_reaction(
self.user_profile, message, "tada", "1f389", "unicode_emoji"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_add_submessage(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('submessage')),
('message_id', check_int),
('submessage_id', check_int),
('sender_id', check_int),
('msg_type', check_string),
('content', check_string),
])
cordelia = self.example_user('cordelia')
stream_name = 'Verona'
message_id = self.send_stream_message(
sender_email=cordelia.email,
stream_name=stream_name,
)
events = self.do_test(
lambda: do_add_submessage(
realm=cordelia.realm,
sender_id=cordelia.id,
message_id=message_id,
msg_type='whatever',
content='"stuff"',
),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_remove_reaction(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('remove')),
('message_id', check_int),
('emoji_name', check_string),
('emoji_code', check_string),
('reaction_type', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_stream_message(self.example_email("hamlet"), "Verona", "hello")
message = Message.objects.get(id=message_id)
do_add_reaction(self.user_profile, message, "tada", "1f389", "unicode_emoji")
events = self.do_test(
lambda: do_remove_reaction(
self.user_profile, message, "1f389", "unicode_emoji"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_invite_user_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Scotland"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
events = self.do_test(
lambda: do_invite_users(self.user_profile, ["[email protected]"], streams, False),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_create_multiuse_invite_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Verona"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
events = self.do_test(
lambda: do_create_multiuse_invite_link(self.user_profile, PreregistrationUser.INVITE_AS['MEMBER'], streams),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_revoke_user_invite_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Verona"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_invite_users(self.user_profile, ["[email protected]"], streams, False)
prereg_users = PreregistrationUser.objects.filter(referred_by__realm=self.user_profile.realm)
events = self.do_test(
lambda: do_revoke_user_invite(prereg_users[0]),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_revoke_multiuse_invite_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Verona"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_create_multiuse_invite_link(self.user_profile, PreregistrationUser.INVITE_AS['MEMBER'], streams)
multiuse_object = MultiuseInvite.objects.get()
events = self.do_test(
lambda: do_revoke_multi_use_invite(multiuse_object),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_invitation_accept_invite_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Scotland"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_invite_users(self.user_profile, ["[email protected]"], streams, False)
prereg_users = PreregistrationUser.objects.get(email="[email protected]")
events = self.do_test(
lambda: do_create_user('[email protected]', 'password', self.user_profile.realm,
'full name', 'short name', prereg_user=prereg_users),
state_change_expected=True,
num_events=5,
)
error = schema_checker('events[4]', events[4])
self.assert_on_error(error)
def test_typing_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('typing')),
('op', equals('start')),
('sender', check_dict_only([
('email', check_string),
('user_id', check_int)])),
('recipients', check_list(check_dict_only([
('email', check_string),
('user_id', check_int),
]))),
])
events = self.do_test(
lambda: check_send_typing_notification(
self.user_profile, [self.example_email("cordelia")], "start"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_get_typing_user_profiles(self) -> None:
"""
Make sure we properly assert failures for recipient types that should not
get typing... notifications.
"""
sender_profile = self.example_user('cordelia')
stream = get_stream('Rome', sender_profile.realm)
# Test stream
with self.assertRaisesRegex(ValueError, 'not supported for streams'):
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
get_typing_user_profiles(recipient, sender_profile.id)
# Test some other recipient type
with self.assertRaisesRegex(ValueError, 'Bad recipient type'):
recipient = Recipient(type=999) # invalid type
get_typing_user_profiles(recipient, sender_profile.id)
def test_custom_profile_fields_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('custom_profile_fields')),
('op', equals('add')),
('fields', check_list(check_dict_only([
('id', check_int),
('type', check_int),
('name', check_string),
('hint', check_string),
('field_data', check_string),
('order', check_int),
]))),
])
events = self.do_test(
lambda: notify_realm_custom_profile_fields(
self.user_profile.realm, 'add'),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
realm = self.user_profile.realm
field = realm.customprofilefield_set.get(realm=realm, name='Biography')
name = field.name
hint = 'Biography of the user'
try_update_realm_custom_profile_field(realm, field, name, hint=hint)
events = self.do_test(
lambda: notify_realm_custom_profile_fields(
self.user_profile.realm, 'add'),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_custom_profile_field_data_events(self) -> None:
schema_checker_basic = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('user_id', check_int),
('custom_profile_field', check_dict([
('id', check_int),
('value', check_none_or(check_string)),
])),
])),
])
schema_checker_with_rendered_value = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('user_id', check_int),
('custom_profile_field', check_dict([
('id', check_int),
('value', check_none_or(check_string)),
('rendered_value', check_none_or(check_string)),
])),
])),
])
field_id = self.user_profile.realm.customprofilefield_set.get(
realm=self.user_profile.realm, name='Biography').id
field = {
"id": field_id,
"value": "New value",
}
events = self.do_test(lambda: do_update_user_custom_profile_data(self.user_profile, [field]))
error = schema_checker_with_rendered_value('events[0]', events[0])
self.assert_on_error(error)
# Test we pass correct stringify value in custom-user-field data event
field_id = self.user_profile.realm.customprofilefield_set.get(
realm=self.user_profile.realm, name='Mentor').id
field = {
"id": field_id,
"value": [self.example_user("ZOE").id],
}
events = self.do_test(lambda: do_update_user_custom_profile_data(self.user_profile, [field]))
error = schema_checker_basic('events[0]', events[0])
self.assert_on_error(error)
def test_presence_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('presence')),
('email', check_string),
('server_timestamp', check_float),
('presence', check_dict_only([
('website', check_dict_only([
('status', equals('active')),
('timestamp', check_int),
('client', check_string),
('pushable', check_bool),
])),
])),
])
events = self.do_test(lambda: do_update_user_presence(
self.user_profile, get_client("website"), timezone_now(), UserPresence.ACTIVE))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_presence_events_multiple_clients(self) -> None:
schema_checker_android = self.check_events_dict([
('type', equals('presence')),
('email', check_string),
('server_timestamp', check_float),
('presence', check_dict_only([
('ZulipAndroid/1.0', check_dict_only([
('status', equals('idle')),
('timestamp', check_int),
('client', check_string),
('pushable', check_bool),
])),
])),
])
self.api_post(self.user_profile.email, "/api/v1/users/me/presence", {'status': 'idle'},
HTTP_USER_AGENT="ZulipAndroid/1.0")
self.do_test(lambda: do_update_user_presence(
self.user_profile, get_client("website"), timezone_now(), UserPresence.ACTIVE))
events = self.do_test(lambda: do_update_user_presence(
self.user_profile, get_client("ZulipAndroid/1.0"), timezone_now(), UserPresence.IDLE))
error = schema_checker_android('events[0]', events[0])
self.assert_on_error(error)
def test_pointer_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('pointer')),
('pointer', check_int)
])
events = self.do_test(lambda: do_update_pointer(self.user_profile, get_client("website"), 1500))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_register_events(self) -> None:
realm_user_add_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('add')),
('person', check_dict_only([
('user_id', check_int),
('email', check_string),
('avatar_url', check_none_or(check_string)),
('full_name', check_string),
('is_admin', check_bool),
('is_bot', check_bool),
('is_guest', check_bool),
('profile_data', check_dict_only([])),
('timezone', check_string),
('date_joined', check_string),
])),
])
events = self.do_test(lambda: self.register("[email protected]", "test1"))
self.assert_length(events, 1)
error = realm_user_add_checker('events[0]', events[0])
self.assert_on_error(error)
new_user_profile = get_user_by_delivery_email("[email protected]", self.user_profile.realm)
self.assertEqual(new_user_profile.email, "[email protected]")
def test_register_events_email_address_visibility(self) -> None:
realm_user_add_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('add')),
('person', check_dict_only([
('user_id', check_int),
('email', check_string),
('avatar_url', check_none_or(check_string)),
('full_name', check_string),
('is_admin', check_bool),
('is_bot', check_bool),
('is_guest', check_bool),
('profile_data', check_dict_only([])),
('timezone', check_string),
('date_joined', check_string),
])),
])
do_set_realm_property(self.user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
events = self.do_test(lambda: self.register("[email protected]", "test1"))
self.assert_length(events, 1)
error = realm_user_add_checker('events[0]', events[0])
self.assert_on_error(error)
new_user_profile = get_user_by_delivery_email("[email protected]", self.user_profile.realm)
self.assertEqual(new_user_profile.email, "user%[email protected]" % (new_user_profile.id,))
def test_alert_words_events(self) -> None:
alert_words_checker = self.check_events_dict([
('type', equals('alert_words')),
('alert_words', check_list(check_string)),
])
events = self.do_test(lambda: do_add_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
def test_away_events(self) -> None:
checker = self.check_events_dict([
('type', equals('user_status')),
('user_id', check_int),
('away', check_bool),
('status_text', check_string),
])
client = get_client("website")
events = self.do_test(lambda: do_update_user_status(user_profile=self.user_profile,
away=True,
status_text='out to lunch',
client_id=client.id))
error = checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_update_user_status(user_profile=self.user_profile,
away=False,
status_text='',
client_id=client.id))
error = checker('events[0]', events[0])
self.assert_on_error(error)
def test_user_group_events(self) -> None:
user_group_add_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('add')),
('group', check_dict_only([
('id', check_int),
('name', check_string),
('members', check_list(check_int)),
('description', check_string),
])),
])
othello = self.example_user('othello')
events = self.do_test(lambda: check_add_user_group(self.user_profile.realm,
'backend', [othello],
'Backend team'))
error = user_group_add_checker('events[0]', events[0])
self.assert_on_error(error)
# Test name update
user_group_update_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('update')),
('group_id', check_int),
('data', check_dict_only([
('name', check_string),
])),
])
backend = UserGroup.objects.get(name='backend')
events = self.do_test(lambda: do_update_user_group_name(backend, 'backendteam'))
error = user_group_update_checker('events[0]', events[0])
self.assert_on_error(error)
# Test description update
user_group_update_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('update')),
('group_id', check_int),
('data', check_dict_only([
('description', check_string),
])),
])
description = "Backend team to deal with backend code."
events = self.do_test(lambda: do_update_user_group_description(backend, description))
error = user_group_update_checker('events[0]', events[0])
self.assert_on_error(error)
# Test add members
user_group_add_member_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('add_members')),
('group_id', check_int),
('user_ids', check_list(check_int)),
])
hamlet = self.example_user('hamlet')
events = self.do_test(lambda: bulk_add_members_to_user_group(backend, [hamlet]))
error = user_group_add_member_checker('events[0]', events[0])
self.assert_on_error(error)
# Test remove members
user_group_remove_member_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('remove_members')),
('group_id', check_int),
('user_ids', check_list(check_int)),
])
hamlet = self.example_user('hamlet')
events = self.do_test(lambda: remove_members_from_user_group(backend, [hamlet]))
error = user_group_remove_member_checker('events[0]', events[0])
self.assert_on_error(error)
# Test delete event
user_group_remove_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('remove')),
('group_id', check_int),
])
events = self.do_test(lambda: check_delete_user_group(backend.id, othello))
error = user_group_remove_checker('events[0]', events[0])
self.assert_on_error(error)
def test_default_stream_groups_events(self) -> None:
default_stream_groups_checker = self.check_events_dict([
('type', equals('default_stream_groups')),
('default_stream_groups', check_list(check_dict_only([
('name', check_string),
('id', check_int),
('description', check_string),
('streams', check_list(check_dict_only([
('description', check_string),
('rendered_description', check_string),
('invite_only', check_bool),
('is_web_public', check_bool),
('is_announcement_only', check_bool),
('name', check_string),
('stream_id', check_int),
('first_message_id', check_none_or(check_int)),
('history_public_to_subscribers', check_bool)]))),
]))),
])
streams = []
for stream_name in ["Scotland", "Verona", "Denmark"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
events = self.do_test(lambda: do_create_default_stream_group(
self.user_profile.realm, "group1", "This is group1", streams))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
group = lookup_default_stream_groups(["group1"], self.user_profile.realm)[0]
venice_stream = get_stream("Venice", self.user_profile.realm)
events = self.do_test(lambda: do_add_streams_to_default_stream_group(self.user_profile.realm,
group, [venice_stream]))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_streams_from_default_stream_group(self.user_profile.realm,
group, [venice_stream]))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_change_default_stream_group_description(self.user_profile.realm,
group, "New description"))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_change_default_stream_group_name(self.user_profile.realm,
group, "New Group Name"))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_default_stream_group(self.user_profile.realm, group))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
def test_default_stream_group_events_guest(self) -> None:
streams = []
for stream_name in ["Scotland", "Verona", "Denmark"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_create_default_stream_group(self.user_profile.realm, "group1",
"This is group1", streams)
group = lookup_default_stream_groups(["group1"], self.user_profile.realm)[0]
do_change_is_guest(self.user_profile, True)
venice_stream = get_stream("Venice", self.user_profile.realm)
self.do_test(lambda: do_add_streams_to_default_stream_group(self.user_profile.realm,
group, [venice_stream]),
state_change_expected = False, num_events=0)
def test_default_streams_events(self) -> None:
default_streams_checker = self.check_events_dict([
('type', equals('default_streams')),
('default_streams', check_list(check_dict_only([
('description', check_string),
('invite_only', check_bool),
('name', check_string),
('stream_id', check_int),
]))),
])
stream = get_stream("Scotland", self.user_profile.realm)
events = self.do_test(lambda: do_add_default_stream(stream))
error = default_streams_checker('events[0]', events[0])
events = self.do_test(lambda: do_remove_default_stream(stream))
error = default_streams_checker('events[0]', events[0])
self.assert_on_error(error)
def test_default_streams_events_guest(self) -> None:
do_change_is_guest(self.user_profile, True)
stream = get_stream("Scotland", self.user_profile.realm)
self.do_test(lambda: do_add_default_stream(stream),
state_change_expected = False, num_events=0)
self.do_test(lambda: do_remove_default_stream(stream),
state_change_expected = False, num_events=0)
def test_muted_topics_events(self) -> None:
muted_topics_checker = self.check_events_dict([
('type', equals('muted_topics')),
('muted_topics', check_list(check_list(check_string, 2))),
])
stream = get_stream('Denmark', self.user_profile.realm)
recipient = get_stream_recipient(stream.id)
events = self.do_test(lambda: do_mute_topic(
self.user_profile, stream, recipient, "topic"))
error = muted_topics_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_unmute_topic(
self.user_profile, stream, "topic"))
error = muted_topics_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_avatar_fields(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('user_id', check_int),
('avatar_url', check_string),
('avatar_url_medium', check_string),
('avatar_source', check_string),
])),
])
events = self.do_test(
lambda: do_change_avatar_fields(self.user_profile, UserProfile.AVATAR_FROM_USER),
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('user_id', check_int),
('avatar_url', check_none_or(check_string)),
('avatar_url_medium', check_none_or(check_string)),
('avatar_source', check_string),
])),
])
events = self.do_test(
lambda: do_change_avatar_fields(self.user_profile, UserProfile.AVATAR_FROM_GRAVATAR),
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_full_name(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int),
])),
])
events = self.do_test(lambda: do_change_full_name(self.user_profile, 'Sir Hamlet', self.user_profile))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_user_delivery_email_email_address_visibilty_admins(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('delivery_email', check_string),
('user_id', check_int),
])),
])
do_set_realm_property(self.user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
# Important: We need to refresh from the database here so that
# we don't have a stale UserProfile object with an old value
# for email being passed into this next function.
self.user_profile.refresh_from_db()
action = lambda: do_change_user_delivery_email(self.user_profile, '[email protected]')
events = self.do_test(action, num_events=1)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def do_set_realm_property_test(self, name: str) -> None:
bool_tests = [True, False, True] # type: List[bool]
test_values = dict(
default_language=[u'es', u'de', u'en'],
description=[u'Realm description', u'New description'],
digest_weekday=[0, 1, 2],
message_retention_days=[10, 20],
name=[u'Zulip', u'New Name'],
waiting_period_threshold=[10, 20],
create_stream_policy=[3, 2, 1],
invite_to_stream_policy=[3, 2, 1],
email_address_visibility=[Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS],
bot_creation_policy=[Realm.BOT_CREATION_EVERYONE],
video_chat_provider=[
Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id'],
Realm.VIDEO_CHAT_PROVIDERS['google_hangouts']['id']
],
google_hangouts_domain=[u"zulip.com", u"zulip.org"],
zoom_api_secret=[u"abc", u"xyz"],
zoom_api_key=[u"abc", u"xyz"],
zoom_user_id=[u"[email protected]", u"[email protected]"]
) # type: Dict[str, Any]
vals = test_values.get(name)
property_type = Realm.property_types[name]
if property_type is bool:
validator = check_bool
vals = bool_tests
elif property_type is str:
validator = check_string
elif property_type is int:
validator = check_int
elif property_type == (int, type(None)):
validator = check_int
else:
raise AssertionError("Unexpected property type %s" % (property_type,))
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals(name)),
('value', validator),
])
if vals is None:
raise AssertionError('No test created for %s' % (name,))
do_set_realm_property(self.user_profile.realm, name, vals[0])
for val in vals[1:]:
state_change_expected = True
if name == "zoom_api_secret":
state_change_expected = False
events = self.do_test(
lambda: do_set_realm_property(self.user_profile.realm, name, val),
state_change_expected=state_change_expected)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
@slow("Actually runs several full-stack fetching tests")
def test_change_realm_property(self) -> None:
for prop in Realm.property_types:
with self.settings(SEND_DIGEST_EMAILS=True):
self.do_set_realm_property_test(prop)
@slow("Runs a large matrix of tests")
def test_change_realm_authentication_methods(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('default')),
('data', check_dict_only([
('authentication_methods', check_dict([]))
])),
])
def fake_backends() -> Any:
backends = (
'zproject.backends.DevAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.GitHubAuthBackend',
'zproject.backends.GoogleAuthBackend',
'zproject.backends.ZulipLDAPAuthBackend',
)
return self.settings(AUTHENTICATION_BACKENDS=backends)
# Test transitions; any new backends should be tested with T/T/T/F/T
for (auth_method_dict) in \
({'Google': True, 'Email': True, 'GitHub': True, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': True, 'GitHub': False, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': False, 'GitHub': False, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': False, 'GitHub': True, 'LDAP': False, 'Dev': False},
{'Google': False, 'Email': False, 'GitHub': False, 'LDAP': False, 'Dev': True},
{'Google': False, 'Email': False, 'GitHub': True, 'LDAP': False, 'Dev': True},
{'Google': False, 'Email': True, 'GitHub': True, 'LDAP': True, 'Dev': False}):
with fake_backends():
events = self.do_test(
lambda: do_set_realm_authentication_methods(
self.user_profile.realm,
auth_method_dict))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_pin_stream(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('update')),
('property', equals('pin_to_top')),
('stream_id', check_int),
('value', check_bool),
('name', check_string),
('email', check_string),
])
stream = get_stream("Denmark", self.user_profile.realm)
sub = get_subscription(stream.name, self.user_profile)
do_change_subscription_property(self.user_profile, sub, stream, "pin_to_top", False)
for pinned in (True, False):
events = self.do_test(lambda: do_change_subscription_property(self.user_profile, sub, stream, "pin_to_top", pinned))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_stream_notification_settings(self) -> None:
for setting_name in ['email_notifications']:
schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('update')),
('property', equals(setting_name)),
('stream_id', check_int),
('value', check_bool),
('name', check_string),
('email', check_string),
])
stream = get_stream("Denmark", self.user_profile.realm)
sub = get_subscription(stream.name, self.user_profile)
# First test with notification_settings_null enabled
for value in (True, False):
events = self.do_test(lambda: do_change_subscription_property(self.user_profile, sub, stream,
setting_name, value),
notification_settings_null=True)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
for value in (True, False):
events = self.do_test(lambda: do_change_subscription_property(self.user_profile, sub, stream,
setting_name, value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
@slow("Runs a matrix of 6 queries to the /home view")
def test_change_realm_message_edit_settings(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('default')),
('data', check_dict_only([
('allow_message_editing', check_bool),
('message_content_edit_limit_seconds', check_int),
('allow_community_topic_editing', check_bool),
])),
])
# Test every transition among the four possibilities {T,F} x {0, non-0}
for (allow_message_editing, message_content_edit_limit_seconds) in \
((True, 0), (False, 0), (False, 1234),
(True, 600), (False, 0), (True, 1234)):
events = self.do_test(
lambda: do_set_realm_message_editing(self.user_profile.realm,
allow_message_editing,
message_content_edit_limit_seconds,
False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_notifications_stream(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('notifications_stream_id')),
('value', check_int),
])
stream = get_stream("Rome", self.user_profile.realm)
for notifications_stream, notifications_stream_id in ((stream, stream.id), (None, -1)):
events = self.do_test(
lambda: do_set_realm_notifications_stream(self.user_profile.realm,
notifications_stream,
notifications_stream_id))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_signup_notifications_stream(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('signup_notifications_stream_id')),
('value', check_int),
])
stream = get_stream("Rome", self.user_profile.realm)
for signup_notifications_stream, signup_notifications_stream_id in ((stream, stream.id), (None, -1)):
events = self.do_test(
lambda: do_set_realm_signup_notifications_stream(self.user_profile.realm,
signup_notifications_stream,
signup_notifications_stream_id))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_is_admin(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('is_admin', check_bool),
('user_id', check_int),
])),
])
do_change_is_admin(self.user_profile, False)
for is_admin in [True, False]:
events = self.do_test(lambda: do_change_is_admin(self.user_profile, is_admin))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def do_set_user_display_settings_test(self, setting_name: str) -> None:
"""Test updating each setting in UserProfile.property_types dict."""
test_changes = dict(
emojiset = [u'apple', u'twitter'],
default_language = [u'es', u'de', u'en'],
timezone = [u'US/Mountain', u'US/Samoa', u'Pacific/Galapogos', u''],
demote_inactive_streams = [2, 3, 1],
) # type: Dict[str, Any]
property_type = UserProfile.property_types[setting_name]
if property_type is bool:
validator = check_bool
elif property_type is str:
validator = check_string
elif property_type is int:
validator = check_int
else:
raise AssertionError("Unexpected property type %s" % (property_type,))
num_events = 1
if setting_name == "timezone":
num_events = 2
values = test_changes.get(setting_name)
if property_type is bool:
if getattr(self.user_profile, setting_name) is False:
values = [True, False, True]
else:
values = [False, True, False]
if values is None:
raise AssertionError('No test created for %s' % (setting_name,))
for value in values:
events = self.do_test(lambda: do_set_user_display_setting(
self.user_profile, setting_name, value), num_events=num_events)
schema_checker = self.check_events_dict([
('type', equals('update_display_settings')),
('setting_name', equals(setting_name)),
('user', check_string),
('setting', validator),
])
language_schema_checker = self.check_events_dict([
('type', equals('update_display_settings')),
('language_name', check_string),
('setting_name', equals(setting_name)),
('user', check_string),
('setting', validator),
])
if setting_name == "default_language":
error = language_schema_checker('events[0]', events[0])
else:
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
timezone_schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('user_id', check_int),
('timezone', check_string),
])),
])
if setting_name == "timezone":
error = timezone_schema_checker('events[1]', events[1])
@slow("Actually runs several full-stack fetching tests")
def test_set_user_display_settings(self) -> None:
for prop in UserProfile.property_types:
self.do_set_user_display_settings_test(prop)
@slow("Actually runs several full-stack fetching tests")
def test_change_notification_settings(self) -> None:
for notification_setting, v in self.user_profile.notification_setting_types.items():
if notification_setting in ["notification_sound", "desktop_icon_count_display"]:
# These settings are tested in their own tests.
continue
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals(notification_setting)),
('user', check_string),
('setting', check_bool),
])
do_change_notification_settings(self.user_profile, notification_setting, False)
for setting_value in [True, False]:
events = self.do_test(lambda: do_change_notification_settings(
self.user_profile, notification_setting, setting_value, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Also test with notification_settings_null=True
events = self.do_test(
lambda: do_change_notification_settings(
self.user_profile, notification_setting, setting_value, log=False),
notification_settings_null=True,
state_change_expected=False)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_notification_sound(self) -> None:
notification_setting = "notification_sound"
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals(notification_setting)),
('user', check_string),
('setting', equals("ding")),
])
events = self.do_test(lambda: do_change_notification_settings(
self.user_profile, notification_setting, 'ding', log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_desktop_icon_count_display(self) -> None:
notification_setting = "desktop_icon_count_display"
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals(notification_setting)),
('user', check_string),
('setting', equals(2)),
])
events = self.do_test(lambda: do_change_notification_settings(
self.user_profile, notification_setting, 2, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals(notification_setting)),
('user', check_string),
('setting', equals(1)),
])
events = self.do_test(lambda: do_change_notification_settings(
self.user_profile, notification_setting, 1, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_update_plan_type(self) -> None:
realm = self.user_profile.realm
state_data = fetch_initial_state_data(self.user_profile, None, "", False)
self.assertEqual(state_data['realm_plan_type'], Realm.SELF_HOSTED)
self.assertEqual(state_data['plan_includes_wide_organization_logo'], True)
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('plan_type')),
('value', equals(Realm.LIMITED)),
('extra_data', check_dict_only([
('upload_quota', check_int)
])),
])
events = self.do_test(lambda: do_change_plan_type(realm, Realm.LIMITED))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
state_data = fetch_initial_state_data(self.user_profile, None, "", False)
self.assertEqual(state_data['realm_plan_type'], Realm.LIMITED)
self.assertEqual(state_data['plan_includes_wide_organization_logo'], False)
def test_realm_emoji_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_emoji')),
('op', equals('update')),
('realm_emoji', check_dict([])),
])
author = self.example_user('iago')
with get_test_image_file('img.png') as img_file:
events = self.do_test(lambda: check_add_realm_emoji(self.user_profile.realm,
"my_emoji",
author,
img_file))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_realm_emoji(self.user_profile.realm, "my_emoji"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_filter_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_filters')),
('realm_filters', check_list(None)), # TODO: validate tuples in the list
])
events = self.do_test(lambda: do_add_realm_filter(self.user_profile.realm, "#(?P<id>[123])",
"https://realm.com/my_realm_filter/%(id)s"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
self.do_test(lambda: do_remove_realm_filter(self.user_profile.realm, "#(?P<id>[123])"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_domain_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_domains')),
('op', equals('add')),
('realm_domain', check_dict_only([
('domain', check_string),
('allow_subdomains', check_bool),
])),
])
events = self.do_test(lambda: do_add_realm_domain(
self.user_profile.realm, 'zulip.org', False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('realm_domains')),
('op', equals('change')),
('realm_domain', check_dict_only([
('domain', equals('zulip.org')),
('allow_subdomains', equals(True)),
])),
])
test_domain = RealmDomain.objects.get(realm=self.user_profile.realm,
domain='zulip.org')
events = self.do_test(lambda: do_change_realm_domain(test_domain, True))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('realm_domains')),
('op', equals('remove')),
('domain', equals('zulip.org')),
])
events = self.do_test(lambda: do_remove_realm_domain(test_domain))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_create_bot(self) -> None:
def get_bot_created_checker(bot_type: str) -> Validator:
if bot_type == "GENERIC_BOT":
check_services = check_list(sub_validator=None, length=0)
elif bot_type == "OUTGOING_WEBHOOK_BOT":
check_services = check_list(check_dict_only([
('base_url', check_url),
('interface', check_int),
('token', check_string),
]), length=1)
elif bot_type == "EMBEDDED_BOT":
check_services = check_list(check_dict_only([
('service_name', check_string),
('config_data', check_dict(value_validator=check_string)),
]), length=1)
return self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('bot_type', check_int),
('full_name', check_string),
('is_active', check_bool),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
('owner', check_string),
('services', check_services),
])),
])
action = lambda: self.create_bot('test')
events = self.do_test(action, num_events=3)
error = get_bot_created_checker(bot_type="GENERIC_BOT")('events[1]', events[1])
self.assert_on_error(error)
action = lambda: self.create_bot('test_outgoing_webhook',
full_name='Outgoing Webhook Bot',
payload_url=ujson.dumps('https://foo.bar.com'),
interface_type=Service.GENERIC,
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT)
events = self.do_test(action, num_events=3)
# The third event is the second call of notify_created_bot, which contains additional
# data for services (in contrast to the first call).
error = get_bot_created_checker(bot_type="OUTGOING_WEBHOOK_BOT")('events[2]', events[2])
self.assert_on_error(error)
action = lambda: self.create_bot('test_embedded',
full_name='Embedded Bot',
service_name='helloworld',
config_data=ujson.dumps({'foo': 'bar'}),
bot_type=UserProfile.EMBEDDED_BOT)
events = self.do_test(action, num_events=3)
error = get_bot_created_checker(bot_type="EMBEDDED_BOT")('events[2]', events[2])
self.assert_on_error(error)
def test_change_bot_full_name(self) -> None:
bot = self.create_bot('test')
action = lambda: do_change_full_name(bot, 'New Bot Name', self.user_profile)
events = self.do_test(action, num_events=2)
error = self.realm_bot_schema('full_name', check_string)('events[1]', events[1])
self.assert_on_error(error)
def test_regenerate_bot_api_key(self) -> None:
bot = self.create_bot('test')
action = lambda: do_regenerate_api_key(bot, self.user_profile)
events = self.do_test(action)
error = self.realm_bot_schema('api_key', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_avatar_source(self) -> None:
bot = self.create_bot('test')
action = lambda: do_change_avatar_fields(bot, bot.AVATAR_FROM_USER)
events = self.do_test(action, num_events=2)
error = self.realm_bot_schema('avatar_url', check_string)('events[0]', events[0])
self.assertEqual(events[1]['type'], 'realm_user')
self.assert_on_error(error)
def test_change_realm_icon_source(self) -> None:
action = lambda: do_change_icon_source(self.user_profile.realm, Realm.ICON_UPLOADED)
events = self.do_test(action, state_change_expected=True)
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('icon')),
('data', check_dict_only([
('icon_url', check_string),
('icon_source', check_string),
])),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_day_mode_logo_source(self) -> None:
action = lambda: do_change_logo_source(self.user_profile.realm, Realm.LOGO_UPLOADED, False)
events = self.do_test(action, state_change_expected=True)
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('logo')),
('data', check_dict_only([
('logo_url', check_string),
('logo_source', check_string),
])),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_night_mode_logo_source(self) -> None:
action = lambda: do_change_logo_source(self.user_profile.realm, Realm.LOGO_UPLOADED, True)
events = self.do_test(action, state_change_expected=True)
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('night_logo')),
('data', check_dict_only([
('night_logo_url', check_string),
('night_logo_source', check_string),
])),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_all_public_streams(self) -> None:
bot = self.create_bot('test')
action = lambda: do_change_default_all_public_streams(bot, True)
events = self.do_test(action)
error = self.realm_bot_schema('default_all_public_streams', check_bool)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_sending_stream(self) -> None:
bot = self.create_bot('test')
stream = get_stream("Rome", bot.realm)
action = lambda: do_change_default_sending_stream(bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_sending_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
action = lambda: do_change_default_sending_stream(bot, None)
events = self.do_test(action)
error = self.realm_bot_schema('default_sending_stream', equals(None))('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_events_register_stream(self) -> None:
bot = self.create_bot('test')
stream = get_stream("Rome", bot.realm)
action = lambda: do_change_default_events_register_stream(bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_events_register_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
action = lambda: do_change_default_events_register_stream(bot, None)
events = self.do_test(action)
error = self.realm_bot_schema('default_events_register_stream', equals(None))('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_owner(self) -> None:
change_bot_owner_checker_user = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('user_id', check_int),
('bot_owner_id', check_int),
])),
])
change_bot_owner_checker_bot = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('owner_id', check_int),
])),
])
self.user_profile = self.example_user('iago')
owner = self.example_user('hamlet')
bot = self.create_bot('test')
action = lambda: do_change_bot_owner(bot, owner, self.user_profile)
events = self.do_test(action, num_events=2)
error = change_bot_owner_checker_bot('events[0]', events[0])
self.assert_on_error(error)
error = change_bot_owner_checker_user('events[1]', events[1])
self.assert_on_error(error)
change_bot_owner_checker_bot = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('delete')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
])),
])
self.user_profile = self.example_user('aaron')
owner = self.example_user('hamlet')
bot = self.create_bot('test1', full_name='Test1 Testerson')
action = lambda: do_change_bot_owner(bot, owner, self.user_profile)
events = self.do_test(action, num_events=2)
error = change_bot_owner_checker_bot('events[0]', events[0])
self.assert_on_error(error)
error = change_bot_owner_checker_user('events[1]', events[1])
self.assert_on_error(error)
check_services = check_list(sub_validator=None, length=0)
change_bot_owner_checker_bot = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('bot_type', check_int),
('full_name', check_string),
('is_active', check_bool),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
('owner', check_string),
('services', check_services),
])),
])
previous_owner = self.example_user('aaron')
self.user_profile = self.example_user('hamlet')
bot = self.create_test_bot('test2', previous_owner, full_name='Test2 Testerson')
action = lambda: do_change_bot_owner(bot, self.user_profile, previous_owner)
events = self.do_test(action, num_events=2)
error = change_bot_owner_checker_bot('events[0]', events[0])
self.assert_on_error(error)
error = change_bot_owner_checker_user('events[1]', events[1])
self.assert_on_error(error)
def test_do_update_outgoing_webhook_service(self):
# type: () -> None
update_outgoing_webhook_service_checker = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('services', check_list(check_dict_only([
('base_url', check_url),
('interface', check_int),
('token', check_string),
]))),
])),
])
self.user_profile = self.example_user('iago')
bot = self.create_test_bot('test', self.user_profile,
full_name='Test Bot',
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
payload_url=ujson.dumps('http://hostname.domain2.com'),
interface_type=Service.GENERIC,
)
action = lambda: do_update_outgoing_webhook_service(bot, 2, 'http://hostname.domain2.com')
events = self.do_test(action)
error = update_outgoing_webhook_service_checker('events[0]', events[0])
self.assert_on_error(error)
def test_do_deactivate_user(self) -> None:
bot_deactivate_checker = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('remove')),
('bot', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int),
])),
])
bot = self.create_bot('test')
action = lambda: do_deactivate_user(bot)
events = self.do_test(action, num_events=2)
error = bot_deactivate_checker('events[1]', events[1])
self.assert_on_error(error)
def test_do_reactivate_user(self) -> None:
bot_reactivate_checker = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('bot_type', check_int),
('full_name', check_string),
('is_active', check_bool),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
('owner', check_none_or(check_string)),
('services', check_list(check_dict_only([
('base_url', check_url),
('interface', check_int),
]))),
])),
])
bot = self.create_bot('test')
do_deactivate_user(bot)
action = lambda: do_reactivate_user(bot)
events = self.do_test(action, num_events=2)
error = bot_reactivate_checker('events[1]', events[1])
self.assert_on_error(error)
def test_do_mark_hotspot_as_read(self) -> None:
self.user_profile.tutorial_status = UserProfile.TUTORIAL_WAITING
self.user_profile.save(update_fields=['tutorial_status'])
schema_checker = self.check_events_dict([
('type', equals('hotspots')),
('hotspots', check_list(check_dict_only([
('name', check_string),
('title', check_string),
('description', check_string),
('delay', check_float),
]))),
])
events = self.do_test(lambda: do_mark_hotspot_as_read(self.user_profile, 'intro_reply'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_rename_stream(self) -> None:
stream = self.make_stream('old_name')
new_name = u'stream with a brand new name'
self.subscribe(self.user_profile, stream.name)
notification = '<p><span class="user-mention silent" data-user-id="4">King Hamlet</span> renamed stream <strong>old_name</strong> to <strong>stream with a brand new name</strong>.</p>'
action = lambda: do_rename_stream(stream, new_name, self.user_profile)
events = self.do_test(action, num_events=3)
schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('email_address')),
('value', check_string),
('stream_id', check_int),
('name', equals('old_name')),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('name')),
('value', equals(new_name)),
('name', equals('old_name')),
('stream_id', check_int),
])
error = schema_checker('events[1]', events[1])
self.assert_on_error(error)
schema_checker = check_dict([
('flags', check_list(check_string)),
('type', equals('message')),
('message', check_dict([
('timestamp', check_int),
('content', equals(notification)),
('content_type', equals('text/html')),
('sender_email', equals('[email protected]')),
('sender_id', check_int),
('sender_short_name', equals('notification-bot')),
('display_recipient', equals(new_name)),
('id', check_int),
('stream_id', check_int),
('sender_realm_str', check_string),
('sender_full_name', equals('Notification Bot')),
('is_me_message', equals(False)),
('type', equals('stream')),
('submessages', check_list(check_string)),
(TOPIC_LINKS, check_list(check_url)),
('avatar_url', check_url),
('reactions', check_list(None)),
('client', equals('Internal')),
(TOPIC_NAME, equals('stream events')),
('recipient_id', check_int)
])),
('id', check_int)
])
error = schema_checker('events[2]', events[2])
self.assert_on_error(error)
def test_deactivate_stream_neversubscribed(self) -> None:
stream = self.make_stream('old_name')
action = lambda: do_deactivate_stream(stream)
events = self.do_test(action)
schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('delete')),
('streams', check_list(check_dict([]))),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_subscribe_other_user_never_subscribed(self) -> None:
action = lambda: self.subscribe(self.example_user("othello"), u"test_stream")
events = self.do_test(action, num_events=2)
peer_add_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
error = peer_add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
@slow("Actually several tests combined together")
def test_subscribe_events(self) -> None:
self.do_test_subscribe_events(include_subscribers=True)
@slow("Actually several tests combined together")
def test_subscribe_events_no_include_subscribers(self) -> None:
self.do_test_subscribe_events(include_subscribers=False)
def do_test_subscribe_events(self, include_subscribers: bool) -> None:
subscription_fields = [
('color', check_string),
('description', check_string),
('rendered_description', check_string),
('email_address', check_string),
('invite_only', check_bool),
('is_web_public', check_bool),
('is_announcement_only', check_bool),
('is_muted', check_bool),
('in_home_view', check_bool),
('name', check_string),
('audible_notifications', check_none_or(check_bool)),
('email_notifications', check_none_or(check_bool)),
('desktop_notifications', check_none_or(check_bool)),
('push_notifications', check_none_or(check_bool)),
('stream_id', check_int),
('first_message_id', check_none_or(check_int)),
('history_public_to_subscribers', check_bool),
('pin_to_top', check_bool),
('stream_weekly_traffic', check_none_or(check_int)),
('is_old_stream', check_bool),
]
if include_subscribers:
subscription_fields.append(('subscribers', check_list(check_int)))
subscription_schema_checker = check_list(
check_dict_only(subscription_fields),
)
stream_create_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('create')),
('streams', check_list(check_dict_only([
('name', check_string),
('stream_id', check_int),
('invite_only', check_bool),
('description', check_string),
('rendered_description', check_string),
]))),
])
add_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('add')),
('subscriptions', subscription_schema_checker),
])
remove_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('remove')),
('subscriptions', check_list(
check_dict_only([
('name', equals('test_stream')),
('stream_id', check_int),
]),
)),
])
peer_add_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
peer_remove_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('peer_remove')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
stream_update_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('description')),
('value', check_string),
('rendered_description', check_string),
('stream_id', check_int),
('name', check_string),
])
stream_update_invite_only_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('invite_only')),
('stream_id', check_int),
('name', check_string),
('value', check_bool),
('history_public_to_subscribers', check_bool),
])
stream_update_is_announcement_only_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('is_announcement_only')),
('stream_id', check_int),
('name', check_string),
('value', check_bool),
])
# Subscribe to a totally new stream, so it's just Hamlet on it
action = lambda: self.subscribe(self.example_user("hamlet"), "test_stream") # type: Callable[[], object]
events = self.do_test(action, event_types=["subscription", "realm_user"],
include_subscribers=include_subscribers)
error = add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Add another user to that totally new stream
action = lambda: self.subscribe(self.example_user("othello"), "test_stream")
events = self.do_test(action,
include_subscribers=include_subscribers,
state_change_expected=include_subscribers,
)
error = peer_add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
stream = get_stream("test_stream", self.user_profile.realm)
# Now remove the first user, to test the normal unsubscribe flow
action = lambda: bulk_remove_subscriptions(
[self.example_user('othello')],
[stream],
get_client("website"))
events = self.do_test(action,
include_subscribers=include_subscribers,
state_change_expected=include_subscribers,
)
error = peer_remove_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Now remove the second user, to test the 'vacate' event flow
action = lambda: bulk_remove_subscriptions(
[self.example_user('hamlet')],
[stream],
get_client("website"))
events = self.do_test(action,
include_subscribers=include_subscribers,
num_events=3)
error = remove_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Now resubscribe a user, to make sure that works on a vacated stream
action = lambda: self.subscribe(self.example_user("hamlet"), "test_stream")
events = self.do_test(action,
include_subscribers=include_subscribers,
num_events=2)
error = add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
action = lambda: do_change_stream_description(stream, u'new description')
events = self.do_test(action,
include_subscribers=include_subscribers)
error = stream_update_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Update stream privacy
action = lambda: do_change_stream_invite_only(stream, True, history_public_to_subscribers=True)
events = self.do_test(action,
include_subscribers=include_subscribers)
error = stream_update_invite_only_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Update stream is_announcement_only property
action = lambda: do_change_stream_announcement_only(stream, True)
events = self.do_test(action,
include_subscribers=include_subscribers)
error = stream_update_is_announcement_only_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Subscribe to a totally new invite-only stream, so it's just Hamlet on it
stream = self.make_stream("private", self.user_profile.realm, invite_only=True)
user_profile = self.example_user('hamlet')
action = lambda: bulk_add_subscriptions([stream], [user_profile])
events = self.do_test(action, include_subscribers=include_subscribers,
num_events=2)
error = stream_create_schema_checker('events[0]', events[0])
error = add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
def test_do_delete_message_stream(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('delete_message')),
('message_id', check_int),
('sender', check_string),
('sender_id', check_int),
('message_type', equals("stream")),
('stream_id', check_int),
('topic', check_string),
])
msg_id = self.send_stream_message("[email protected]", "Verona")
message = Message.objects.get(id=msg_id)
events = self.do_test(
lambda: do_delete_messages(self.user_profile, [message]),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_do_delete_message_personal(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('delete_message')),
('message_id', check_int),
('sender', check_string),
('sender_id', check_int),
('message_type', equals("private")),
('recipient_id', check_int),
])
msg_id = self.send_personal_message(
self.example_email("cordelia"),
self.user_profile.email,
"hello",
)
message = Message.objects.get(id=msg_id)
events = self.do_test(
lambda: do_delete_messages(self.user_profile, [message]),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_do_delete_message_no_max_id(self) -> None:
user_profile = self.example_user('aaron')
# Delete all historical messages for this user
user_profile = self.example_user('hamlet')
UserMessage.objects.filter(user_profile=user_profile).delete()
msg_id = self.send_stream_message("[email protected]", "Verona")
message = Message.objects.get(id=msg_id)
self.do_test(
lambda: do_delete_messages(self.user_profile, [message]),
state_change_expected=True,
)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
self.assertEqual(result['max_message_id'], -1)
def test_add_attachment(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('attachment')),
('op', equals('add')),
('attachment', check_dict_only([
('id', check_int),
('name', check_string),
('size', check_int),
('path_id', check_string),
('create_time', check_float),
('messages', check_list(check_dict_only([
('id', check_int),
('name', check_float),
]))),
])),
('upload_space_used', equals(6)),
])
self.login(self.example_email("hamlet"))
fp = StringIO("zulip!")
fp.name = "zulip.txt"
data = {'uri': None}
def do_upload() -> None:
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
self.assertIn("uri", result.json())
uri = result.json()["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
data['uri'] = uri
events = self.do_test(
lambda: do_upload(),
num_events=1, state_change_expected=False)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Verify that the DB has the attachment marked as unclaimed
entry = Attachment.objects.get(file_name='zulip.txt')
self.assertEqual(entry.is_claimed(), False)
# Now we send an actual message using this attachment.
schema_checker = self.check_events_dict([
('type', equals('attachment')),
('op', equals('update')),
('attachment', check_dict_only([
('id', check_int),
('name', check_string),
('size', check_int),
('path_id', check_string),
('create_time', check_float),
('messages', check_list(check_dict_only([
('id', check_int),
('name', check_float),
]))),
])),
('upload_space_used', equals(6)),
])
self.subscribe(self.example_user("hamlet"), "Denmark")
body = "First message ...[zulip.txt](http://localhost:9991" + data['uri'] + ")"
events = self.do_test(
lambda: self.send_stream_message(self.example_email("hamlet"), "Denmark", body, "test"),
num_events=2)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Now remove the attachment
schema_checker = self.check_events_dict([
('type', equals('attachment')),
('op', equals('remove')),
('attachment', check_dict_only([
('id', check_int),
])),
('upload_space_used', equals(0)),
])
events = self.do_test(
lambda: self.client_delete("/json/attachments/%s" % (entry.id,)),
num_events=1, state_change_expected=False)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_notify_realm_export(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_export')),
('exports', check_list(check_dict_only([
('id', check_int),
('event_time', check_string),
('acting_user_id', check_int),
('extra_data', check_dict_only([
('export_path', check_string),
('deleted_timestamp', equals(None))
])),
])))
])
do_change_is_admin(self.user_profile, True)
self.login(self.user_profile.email)
with mock.patch('zerver.lib.export.do_export_realm',
return_value=create_dummy_file('test-export.tar.gz')):
events = self.do_test(
lambda: print(self.client_post('/json/export/realm').content),
state_change_expected=True, num_events=2)
# The first event is a message from notification-bot.
error = schema_checker('events[1]', events[1])
self.assert_on_error(error)
class FetchInitialStateDataTest(ZulipTestCase):
# Non-admin users don't have access to all bots
def test_realm_bots_non_admin(self) -> None:
user_profile = self.example_user('cordelia')
self.assertFalse(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
self.assert_length(result['realm_bots'], 0)
# additionally the API key for a random bot is not present in the data
api_key = get_api_key(self.notification_bot())
self.assertNotIn(api_key, str(result))
# Admin users have access to all bots in the realm_bots field
def test_realm_bots_admin(self) -> None:
user_profile = self.example_user('hamlet')
do_change_is_admin(user_profile, True)
self.assertTrue(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
self.assertTrue(len(result['realm_bots']) > 2)
def test_max_message_id_with_no_history(self) -> None:
user_profile = self.example_user('aaron')
# Delete all historical messages for this user
UserMessage.objects.filter(user_profile=user_profile).delete()
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
self.assertEqual(result['max_message_id'], -1)
class GetUnreadMsgsTest(ZulipTestCase):
def mute_stream(self, user_profile: UserProfile, stream: Stream) -> None:
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscription = Subscription.objects.get(
user_profile=user_profile,
recipient=recipient
)
subscription.is_muted = True
subscription.save()
def mute_topic(self, user_profile: UserProfile, stream_name: str,
topic_name: str) -> None:
realm = user_profile.realm
stream = get_stream(stream_name, realm)
recipient = get_stream_recipient(stream.id)
add_topic_mute(
user_profile=user_profile,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name=topic_name,
)
def test_raw_unread_stream(self) -> None:
cordelia = self.example_user('cordelia')
hamlet = self.example_user('hamlet')
realm = hamlet.realm
for stream_name in ['social', 'devel', 'test here']:
self.subscribe(hamlet, stream_name)
self.subscribe(cordelia, stream_name)
all_message_ids = set() # type: Set[int]
message_ids = dict()
tups = [
('social', 'lunch'),
('test here', 'bla'),
('devel', 'python'),
('devel', 'ruby'),
]
for stream_name, topic_name in tups:
message_ids[topic_name] = [
self.send_stream_message(
sender_email=cordelia.email,
stream_name=stream_name,
topic_name=topic_name,
) for i in range(3)
]
all_message_ids |= set(message_ids[topic_name])
self.assertEqual(len(all_message_ids), 12) # sanity check on test setup
self.mute_stream(
user_profile=hamlet,
stream=get_stream('test here', realm),
)
self.mute_topic(
user_profile=hamlet,
stream_name='devel',
topic_name='ruby',
)
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
stream_dict = raw_unread_data['stream_dict']
self.assertEqual(
set(stream_dict.keys()),
all_message_ids,
)
self.assertEqual(
raw_unread_data['unmuted_stream_msgs'],
set(message_ids['python']) | set(message_ids['lunch']),
)
self.assertEqual(
stream_dict[message_ids['lunch'][0]],
dict(
sender_id=cordelia.id,
stream_id=get_stream('social', realm).id,
topic='lunch',
)
)
def test_raw_unread_huddle(self) -> None:
cordelia = self.example_user('cordelia')
othello = self.example_user('othello')
hamlet = self.example_user('hamlet')
prospero = self.example_user('prospero')
huddle1_message_ids = [
self.send_huddle_message(
cordelia.email,
[hamlet.email, othello.email]
)
for i in range(3)
]
huddle2_message_ids = [
self.send_huddle_message(
cordelia.email,
[hamlet.email, prospero.email]
)
for i in range(3)
]
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
huddle_dict = raw_unread_data['huddle_dict']
self.assertEqual(
set(huddle_dict.keys()),
set(huddle1_message_ids) | set(huddle2_message_ids)
)
huddle_string = ','.join(
str(uid)
for uid in sorted([cordelia.id, hamlet.id, othello.id])
)
self.assertEqual(
huddle_dict[huddle1_message_ids[0]],
dict(user_ids_string=huddle_string),
)
def test_raw_unread_personal(self) -> None:
cordelia = self.example_user('cordelia')
othello = self.example_user('othello')
hamlet = self.example_user('hamlet')
cordelia_pm_message_ids = [
self.send_personal_message(cordelia.email, hamlet.email)
for i in range(3)
]
othello_pm_message_ids = [
self.send_personal_message(othello.email, hamlet.email)
for i in range(3)
]
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
pm_dict = raw_unread_data['pm_dict']
self.assertEqual(
set(pm_dict.keys()),
set(cordelia_pm_message_ids) | set(othello_pm_message_ids)
)
self.assertEqual(
pm_dict[cordelia_pm_message_ids[0]],
dict(sender_id=cordelia.id),
)
def test_unread_msgs(self) -> None:
cordelia = self.example_user('cordelia')
sender_id = cordelia.id
sender_email = cordelia.email
user_profile = self.example_user('hamlet')
othello = self.example_user('othello')
# our tests rely on order
assert(sender_email < user_profile.email)
assert(user_profile.email < othello.email)
pm1_message_id = self.send_personal_message(sender_email, user_profile.email, "hello1")
pm2_message_id = self.send_personal_message(sender_email, user_profile.email, "hello2")
muted_stream = self.subscribe(user_profile, 'Muted Stream')
self.mute_stream(user_profile, muted_stream)
self.mute_topic(user_profile, 'Denmark', 'muted-topic')
stream_message_id = self.send_stream_message(sender_email, "Denmark", "hello")
muted_stream_message_id = self.send_stream_message(sender_email, "Muted Stream", "hello")
muted_topic_message_id = self.send_stream_message(
sender_email,
"Denmark",
topic_name="muted-topic",
content="hello",
)
huddle_message_id = self.send_huddle_message(
sender_email,
[user_profile.email, othello.email],
'hello3',
)
def get_unread_data() -> UnreadMessagesResult:
raw_unread_data = get_raw_unread_data(user_profile)
aggregated_data = aggregate_unread_data(raw_unread_data)
return aggregated_data
result = get_unread_data()
# The count here reflects the count of unread messages that we will
# report to users in the bankruptcy dialog, and for now it excludes unread messages
# from muted treams, but it doesn't exclude unread messages from muted topics yet.
self.assertEqual(result['count'], 4)
unread_pm = result['pms'][0]
self.assertEqual(unread_pm['sender_id'], sender_id)
self.assertEqual(unread_pm['unread_message_ids'], [pm1_message_id, pm2_message_id])
self.assertTrue('sender_ids' not in unread_pm)
unread_stream = result['streams'][0]
self.assertEqual(unread_stream['stream_id'], get_stream('Denmark', user_profile.realm).id)
self.assertEqual(unread_stream['topic'], 'muted-topic')
self.assertEqual(unread_stream['unread_message_ids'], [muted_topic_message_id])
self.assertEqual(unread_stream['sender_ids'], [sender_id])
unread_stream = result['streams'][1]
self.assertEqual(unread_stream['stream_id'], get_stream('Denmark', user_profile.realm).id)
self.assertEqual(unread_stream['topic'], 'test')
self.assertEqual(unread_stream['unread_message_ids'], [stream_message_id])
self.assertEqual(unread_stream['sender_ids'], [sender_id])
unread_stream = result['streams'][2]
self.assertEqual(unread_stream['stream_id'], get_stream('Muted Stream', user_profile.realm).id)
self.assertEqual(unread_stream['topic'], 'test')
self.assertEqual(unread_stream['unread_message_ids'], [muted_stream_message_id])
self.assertEqual(unread_stream['sender_ids'], [sender_id])
huddle_string = ','.join(str(uid) for uid in sorted([sender_id, user_profile.id, othello.id]))
unread_huddle = result['huddles'][0]
self.assertEqual(unread_huddle['user_ids_string'], huddle_string)
self.assertEqual(unread_huddle['unread_message_ids'], [huddle_message_id])
self.assertTrue('sender_ids' not in unread_huddle)
self.assertEqual(result['mentions'], [])
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=stream_message_id
)
um.flags |= UserMessage.flags.mentioned
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [stream_message_id])
class ClientDescriptorsTest(ZulipTestCase):
def test_get_client_info_for_all_public_streams(self) -> None:
hamlet = self.example_user('hamlet')
realm = hamlet.realm
queue_data = dict(
all_public_streams=True,
apply_markdown=True,
client_gravatar=True,
client_type_name='website',
event_types=['message'],
last_connection_time=time.time(),
queue_timeout=0,
realm_id=realm.id,
user_profile_id=hamlet.id,
)
client = allocate_client_descriptor(queue_data)
message_event = dict(
realm_id=realm.id,
stream_name='whatever',
)
client_info = get_client_info_for_message_event(
message_event,
users=[],
)
self.assertEqual(len(client_info), 1)
dct = client_info[client.event_queue.id]
self.assertEqual(dct['client'].apply_markdown, True)
self.assertEqual(dct['client'].client_gravatar, True)
self.assertEqual(dct['client'].user_profile_id, hamlet.id)
self.assertEqual(dct['flags'], [])
self.assertEqual(dct['is_sender'], False)
message_event = dict(
realm_id=realm.id,
stream_name='whatever',
sender_queue_id=client.event_queue.id,
)
client_info = get_client_info_for_message_event(
message_event,
users=[],
)
dct = client_info[client.event_queue.id]
self.assertEqual(dct['is_sender'], True)
def test_get_client_info_for_normal_users(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
realm = hamlet.realm
def test_get_info(apply_markdown: bool, client_gravatar: bool) -> None:
clear_client_event_queues_for_testing()
queue_data = dict(
all_public_streams=False,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
client_type_name='website',
event_types=['message'],
last_connection_time=time.time(),
queue_timeout=0,
realm_id=realm.id,
user_profile_id=hamlet.id,
)
client = allocate_client_descriptor(queue_data)
message_event = dict(
realm_id=realm.id,
stream_name='whatever',
)
client_info = get_client_info_for_message_event(
message_event,
users=[
dict(id=cordelia.id),
],
)
self.assertEqual(len(client_info), 0)
client_info = get_client_info_for_message_event(
message_event,
users=[
dict(id=cordelia.id),
dict(id=hamlet.id, flags=['mentioned']),
],
)
self.assertEqual(len(client_info), 1)
dct = client_info[client.event_queue.id]
self.assertEqual(dct['client'].apply_markdown, apply_markdown)
self.assertEqual(dct['client'].client_gravatar, client_gravatar)
self.assertEqual(dct['client'].user_profile_id, hamlet.id)
self.assertEqual(dct['flags'], ['mentioned'])
self.assertEqual(dct['is_sender'], False)
test_get_info(apply_markdown=False, client_gravatar=False)
test_get_info(apply_markdown=True, client_gravatar=False)
test_get_info(apply_markdown=False, client_gravatar=True)
test_get_info(apply_markdown=True, client_gravatar=True)
def test_process_message_event_with_mocked_client_info(self) -> None:
hamlet = self.example_user("hamlet")
class MockClient:
def __init__(self, user_profile_id: int,
apply_markdown: bool,
client_gravatar: bool) -> None:
self.user_profile_id = user_profile_id
self.apply_markdown = apply_markdown
self.client_gravatar = client_gravatar
self.client_type_name = 'whatever'
self.events = [] # type: List[Dict[str, Any]]
def accepts_messages(self) -> bool:
return True
def accepts_event(self, event: Dict[str, Any]) -> bool:
assert(event['type'] == 'message')
return True
def add_event(self, event: Dict[str, Any]) -> None:
self.events.append(event)
client1 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=True,
client_gravatar=False,
)
client2 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=False,
client_gravatar=False,
)
client3 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=True,
client_gravatar=True,
)
client4 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=False,
client_gravatar=True,
)
client_info = {
'client:1': dict(
client=client1,
flags=['starred'],
),
'client:2': dict(
client=client2,
flags=['has_alert_word'],
),
'client:3': dict(
client=client3,
flags=[],
),
'client:4': dict(
client=client4,
flags=[],
),
}
sender = hamlet
message_event = dict(
message_dict=dict(
id=999,
content='**hello**',
rendered_content='<b>hello</b>',
sender_id=sender.id,
type='stream',
client='website',
# NOTE: Some of these fields are clutter, but some
# will be useful when we let clients specify
# that they can compute their own gravatar URLs.
sender_email=sender.email,
sender_realm_id=sender.realm_id,
sender_avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
sender_avatar_version=1,
sender_is_mirror_dummy=None,
raw_display_recipient=None,
recipient_type=None,
recipient_type_id=None,
),
)
# Setting users to `[]` bypasses code we don't care about
# for this test--we assume client_info is correct in our mocks,
# and we are interested in how messages are put on event queue.
users = [] # type: List[Dict[str, Any]]
with mock.patch('zerver.tornado.event_queue.get_client_info_for_message_event',
return_value=client_info):
process_message_event(message_event, users)
# We are not closely examining avatar_url at this point, so
# just sanity check them and then delete the keys so that
# upcoming comparisons work.
for client in [client1, client2]:
message = client.events[0]['message']
self.assertIn('gravatar.com', message['avatar_url'])
message.pop('avatar_url')
self.assertEqual(client1.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
id=999,
content='<b>hello</b>',
content_type='text/html',
client='website',
),
flags=['starred'],
),
])
self.assertEqual(client2.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
id=999,
content='**hello**',
content_type='text/x-markdown',
client='website',
),
flags=['has_alert_word'],
),
])
self.assertEqual(client3.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
avatar_url=None,
id=999,
content='<b>hello</b>',
content_type='text/html',
client='website',
),
flags=[],
),
])
self.assertEqual(client4.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
avatar_url=None,
id=999,
content='**hello**',
content_type='text/x-markdown',
client='website',
),
flags=[],
),
])
class FetchQueriesTest(ZulipTestCase):
def test_queries(self) -> None:
user = self.example_user("hamlet")
self.login(user.email)
flush_per_request_caches()
with queries_captured() as queries:
with mock.patch('zerver.lib.events.always_want') as want_mock:
fetch_initial_state_data(
user_profile=user,
event_types=None,
queue_id='x',
client_gravatar=False,
)
self.assert_length(queries, 33)
expected_counts = dict(
alert_words=0,
custom_profile_fields=1,
default_streams=1,
default_stream_groups=1,
hotspots=0,
message=1,
muted_topics=1,
pointer=0,
presence=3,
realm=0,
realm_bot=1,
realm_domains=1,
realm_embedded_bots=0,
realm_emoji=1,
realm_filters=1,
realm_user=3,
realm_user_groups=2,
recent_private_conversations=2,
starred_messages=1,
stream=2,
stop_words=0,
subscription=6,
update_display_settings=0,
update_global_notifications=0,
update_message_flags=5,
user_status=1,
zulip_version=0,
)
wanted_event_types = {
item[0][0] for item
in want_mock.call_args_list
}
self.assertEqual(wanted_event_types, set(expected_counts))
for event_type in sorted(wanted_event_types):
count = expected_counts[event_type]
flush_per_request_caches()
with queries_captured() as queries:
if event_type == 'update_message_flags':
event_types = ['update_message_flags', 'message']
else:
event_types = [event_type]
fetch_initial_state_data(
user_profile=user,
event_types=event_types,
queue_id='x',
client_gravatar=False,
)
self.assert_length(queries, count)
class TestEventsRegisterAllPublicStreamsDefaults(ZulipTestCase):
def setUp(self) -> None:
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.email
def test_use_passed_all_public_true_default_false(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_true_default(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_false_default_false(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_passed_all_public_false_default_true(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_true_default_for_none(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertTrue(result)
def test_use_false_default_for_none(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertFalse(result)
class TestEventsRegisterNarrowDefaults(ZulipTestCase):
def setUp(self) -> None:
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.email
self.stream = get_stream('Verona', self.user_profile.realm)
def test_use_passed_narrow_no_default(self) -> None:
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [[u'stream', u'my_stream']])
self.assertEqual(result, [[u'stream', u'my_stream']])
def test_use_passed_narrow_with_default(self) -> None:
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [[u'stream', u'my_stream']])
self.assertEqual(result, [[u'stream', u'my_stream']])
def test_use_default_if_narrow_is_empty(self) -> None:
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [[u'stream', u'Verona']])
def test_use_narrow_if_default_is_none(self) -> None:
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [])
class TestGetRawUserDataSystemBotRealm(ZulipTestCase):
def test_get_raw_user_data_on_system_bot_realm(self) -> None:
result = get_raw_user_data(get_realm("zulipinternal"), True)
for bot_email in settings.CROSS_REALM_BOT_EMAILS:
bot_profile = get_system_bot(bot_email)
self.assertTrue(bot_profile.id in result)
self.assertTrue(result[bot_profile.id]['is_cross_realm_bot'])
| apache-2.0 | 131,887,794,665,977,280 | 40.921692 | 192 | 0.543304 | false |
crossgovernmentservices/csd-notes | app/blueprints/sso/views.py | 1 | 1740 | # -*- coding: utf-8 -*-
"""
Single Sign-On views
"""
from urllib.parse import unquote, urlparse, urlunparse
from flask import (
Blueprint,
redirect,
request,
session,
url_for
)
from flask_security.utils import login_user, logout_user
from app.extensions import (
user_datastore,
oidc
)
sso = Blueprint('sso', __name__)
def sanitize_url(url):
if url:
parts = list(urlparse(url))
parts[0] = ''
parts[1] = ''
parts[3] = ''
url = urlunparse(parts[:6])
return url
@sso.route('/login')
def login():
"login redirects to Dex for SSO login/registration"
next_url = sanitize_url(unquote(request.args.get('next', '')))
if next_url:
session['next_url'] = next_url
return redirect(oidc.login('dex'))
@sso.route('/logout')
def logout():
logout_user()
return redirect(url_for('base.index'))
@sso.route('/callback')
@oidc.callback
def oidc_callback():
user_info = oidc.authenticate('dex', request)
user = user_datastore.get_user(user_info['email'])
if not user:
user = create_user(user_info)
login_user(user)
next_url = url_for('base.index')
if 'next_url' in session:
next_url = session['next_url']
del session['next_url']
return redirect(next_url)
def create_user(user_info):
email = user_info['email']
name = user_info.get('nickname', user_info.get('name'))
user = add_role('USER', user_datastore.create_user(
email=email,
full_name=name))
user_datastore.commit()
return user
def add_role(role, user):
user_role = user_datastore.find_or_create_role(role)
user_datastore.add_role_to_user(user, user_role)
return user
| mit | 8,933,051,197,988,040,000 | 17.913043 | 66 | 0.616667 | false |
mitsuhiko/sentry | src/sentry/web/frontend/admin.py | 1 | 10551 | """
sentry.web.frontend.admin
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import functools
import logging
import sys
import uuid
from collections import defaultdict
import pkg_resources
import six
from django.conf import settings
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import Count
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.csrf import csrf_protect
from sentry import options
from sentry.app import env
from sentry.models import Project, Team, User
from sentry.plugins import plugins
from sentry.utils.email import send_mail
from sentry.utils.http import absolute_uri
from sentry.utils.warnings import DeprecatedSettingWarning, seen_warnings
from sentry.web.decorators import requires_admin
from sentry.web.forms import (
ChangeUserForm, NewUserForm, RemoveUserForm, TestEmailForm
)
from sentry.web.helpers import render_to_response, render_to_string
def configure_plugin(request, slug):
plugin = plugins.get(slug)
if not plugin.has_site_conf():
return HttpResponseRedirect(reverse('sentry'))
view = plugin.configure(request=request)
if isinstance(view, HttpResponse):
return view
return render_to_response('sentry/admin/plugins/configure.html', {
'plugin': plugin,
'title': plugin.get_conf_title(),
'slug': plugin.slug,
'view': view,
}, request)
@requires_admin
def manage_projects(request):
project_list = Project.objects.filter(
status=0,
team__isnull=False,
).select_related('team')
project_query = request.GET.get('pquery')
if project_query:
project_list = project_list.filter(name__icontains=project_query)
sort = request.GET.get('sort')
if sort not in ('name', 'date'):
sort = 'date'
if sort == 'date':
order_by = '-date_added'
elif sort == 'name':
order_by = 'name'
project_list = project_list.order_by(order_by)
context = {
'project_list': project_list,
'project_query': project_query,
'sort': sort,
}
return render_to_response('sentry/admin/projects/list.html', context, request)
@requires_admin
def manage_users(request):
user_list = User.objects.all().order_by('-date_joined')
user_query = request.GET.get('uquery')
if user_query:
user_list = user_list.filter(email__icontains=user_query)
sort = request.GET.get('sort')
if sort not in ('name', 'joined', 'login'):
sort = 'joined'
if sort == 'joined':
order_by = '-date_joined'
elif sort == 'login':
order_by = '-last_login'
elif sort == 'name':
order_by = 'name'
user_list = user_list.order_by(order_by)
return render_to_response('sentry/admin/users/list.html', {
'user_list': user_list,
'user_query': user_query,
'sort': sort,
}, request)
@requires_admin
@transaction.atomic
@csrf_protect
def create_new_user(request):
if not request.is_superuser():
return HttpResponseRedirect(reverse('sentry'))
form = NewUserForm(request.POST or None, initial={
'send_welcome_mail': True,
'create_project': True,
})
if form.is_valid():
user = form.save(commit=False)
# create a random password
password = uuid.uuid4().hex
user.set_password(password)
user.save()
if form.cleaned_data['send_welcome_mail']:
context = {
'username': user.username,
'password': password,
'url': absolute_uri(reverse('sentry')),
}
body = render_to_string('sentry/emails/welcome_mail.txt', context, request)
try:
send_mail(
'%s Welcome to Sentry' % (options.get('mail.subject-prefix'),),
body, options.get('mail.from'), [user.email],
fail_silently=False
)
except Exception as e:
logger = logging.getLogger('sentry.mail.errors')
logger.exception(e)
return HttpResponseRedirect(reverse('sentry-admin-users'))
context = {
'form': form,
}
context.update(csrf(request))
return render_to_response('sentry/admin/users/new.html', context, request)
@requires_admin
@csrf_protect
def edit_user(request, user_id):
if not request.is_superuser():
return HttpResponseRedirect(reverse('sentry'))
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return HttpResponseRedirect(reverse('sentry-admin-users'))
form = ChangeUserForm(request.POST or None, instance=user)
if form.is_valid():
user = form.save()
return HttpResponseRedirect(reverse('sentry-admin-users'))
project_list = Project.objects.filter(
status=0,
organization__member_set__user=user,
).order_by('-date_added')
context = {
'form': form,
'the_user': user,
'project_list': project_list,
}
context.update(csrf(request))
return render_to_response('sentry/admin/users/edit.html', context, request)
@requires_admin
@csrf_protect
def remove_user(request, user_id):
if str(user_id) == str(request.user.id):
return HttpResponseRedirect(reverse('sentry-admin-users'))
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return HttpResponseRedirect(reverse('sentry-admin-users'))
form = RemoveUserForm(request.POST or None)
if form.is_valid():
if form.cleaned_data['removal_type'] == '2':
user.delete()
else:
User.objects.filter(pk=user.pk).update(is_active=False)
return HttpResponseRedirect(reverse('sentry-admin-users'))
context = csrf(request)
context.update({
'form': form,
'the_user': user,
})
return render_to_response('sentry/admin/users/remove.html', context, request)
@requires_admin
def list_user_projects(request, user_id):
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return HttpResponseRedirect(reverse('sentry-admin-users'))
project_list = Project.objects.filter(
status=0,
organization__member_set__user=user,
).order_by('-date_added')
context = {
'project_list': project_list,
'the_user': user,
}
return render_to_response('sentry/admin/users/list_projects.html', context, request)
@requires_admin
def manage_teams(request):
team_list = Team.objects.order_by('-date_added')
team_query = request.GET.get('tquery')
if team_query:
team_list = team_list.filter(name__icontains=team_query)
sort = request.GET.get('sort')
if sort not in ('name', 'date', 'events'):
sort = 'date'
if sort == 'date':
order_by = '-date_added'
elif sort == 'name':
order_by = 'name'
elif sort == 'projects':
order_by = '-num_projects'
team_list = team_list.annotate(
num_projects=Count('project'),
).order_by(order_by)
return render_to_response('sentry/admin/teams/list.html', {
'team_list': team_list,
'team_query': team_query,
'sort': sort,
}, request)
@requires_admin
def status_env(request):
reserved = ('PASSWORD', 'SECRET', 'KEY')
config = []
for k in sorted(dir(settings)):
v_repr = repr(getattr(settings, k))
if any(r.lower() in v_repr.lower() for r in reserved):
v_repr = '*' * 16
if any(r in k for r in reserved):
v_repr = '*' * 16
if k.startswith('_'):
continue
if k.upper() != k:
continue
config.append((k, v_repr))
return render_to_response('sentry/admin/status/env.html', {
'python_version': sys.version,
'config': config,
'environment': env.data,
}, request)
@requires_admin
def status_packages(request):
config = []
for k in sorted(dir(settings)):
if k == 'KEY':
continue
if k.startswith('_'):
continue
if k.upper() != k:
continue
config.append((k, getattr(settings, k)))
return render_to_response('sentry/admin/status/packages.html', {
'modules': sorted([(p.project_name, p.version) for p in pkg_resources.working_set]),
'extensions': [
(p.get_title(), '%s.%s' % (p.__module__, p.__class__.__name__))
for p in plugins.all(version=None)
],
}, request)
@requires_admin
def status_warnings(request):
groupings = {
DeprecatedSettingWarning: 'Deprecated Settings',
}
groups = defaultdict(list)
warnings = []
for warning in seen_warnings:
cls = type(warning)
if cls in groupings:
groups[cls].append(warning)
else:
warnings.append(warning)
sort_by_message = functools.partial(sorted, key=str)
return render_to_response(
'sentry/admin/status/warnings.html',
{
'groups': [(groupings[key], sort_by_message(values)) for key, values in groups.items()],
'warnings': sort_by_message(warnings),
},
request,
)
@requires_admin
@csrf_protect
def status_mail(request):
form = TestEmailForm(request.POST or None)
if form.is_valid():
body = """This email was sent as a request to test the Sentry outbound email configuration."""
try:
send_mail(
'%s Test Email' % (options.get('mail.subject-prefix'),),
body, options.get('mail.from'), [request.user.email],
fail_silently=False
)
except Exception as e:
form.errors['__all__'] = [six.text_type(e)]
return render_to_response('sentry/admin/status/mail.html', {
'form': form,
'mail_host': options.get('mail.host'),
'mail_password': bool(options.get('mail.password')),
'mail_username': options.get('mail.username'),
'mail_port': options.get('mail.port'),
'mail_use_tls': options.get('mail.use-tls'),
'mail_from': options.get('mail.from'),
'mail_list_namespace': options.get('mail.list-namespace'),
}, request)
| bsd-3-clause | -4,158,694,550,379,904,500 | 27.516216 | 102 | 0.610937 | false |
bkabrda/anymarkup-core | test/test_parse.py | 1 | 7564 | # -*- coding: utf-8 -*-
from datetime import datetime
import io
import os
import pytest
import six
import toml
from anymarkup_core import *
from test import *
class TestParse(object):
fixtures = os.path.join(os.path.dirname(__file__), 'fixtures')
def assert_unicode(self, struct):
if isinstance(struct, dict):
for k, v in struct.items():
self.assert_unicode(k)
self.assert_unicode(v)
elif isinstance(struct, list):
for i in struct:
self.assert_unicode(i)
elif isinstance(struct, (six.string_types, type(None), type(True), \
six.integer_types, float, datetime)):
pass
else:
raise AssertionError('Unexpected type {0} in parsed structure'.format(type(struct)))
@pytest.mark.parametrize(('str', 'fmt', 'expected'), [
('', None, {}),
('{}', None, {}),
('[]', None, []),
(example_ini, None, example_as_dict),
(example_json, None, example_as_dict),
(example_json5, 'json5', example_as_dict),
(example_toml, 'toml', toml_example_as_dict), # we can't tell toml from ini
(example_xml, None, example_as_ordered_dict),
(example_yaml_map, None, example_as_dict),
(example_yaml_omap, None, example_as_ordered_dict),
])
def test_parse_basic(self, str, fmt, expected):
parsed = parse(str, fmt)
assert parsed == expected
assert type(parsed) == type(expected)
self.assert_unicode(parsed)
@pytest.mark.parametrize(('str', 'fmt', 'expected'), [
('', None, {}),
('{}', None, {}),
('[]', None, []),
(example_ini, None, example_as_dict),
(example_json, None, example_as_dict),
(example_json5, 'json5', example_as_dict),
(example_toml, 'toml', toml_example_as_dict), # we can't tell toml from ini
(example_xml, None, example_as_ordered_dict),
(example_yaml_map, None, example_as_dict),
(example_yaml_omap, None, example_as_ordered_dict),
])
def test_parse_basic_interpolation_is_false(self, str, fmt, expected):
parsed = parse(str, fmt, interpolate=False)
assert parsed == expected
assert type(parsed) == type(expected)
self.assert_unicode(parsed)
def test_parse_interpolation_fail(self):
with pytest.raises(AnyMarkupError):
parse(example_ini_with_interpolation)
def test_parse_interpolation_pass_when_false(self):
parsed = parse(example_ini_with_interpolation, interpolate=False)
assert type(parsed) == dict
@pytest.mark.parametrize(('str', 'expected'), [
('# comment', {}),
('# comment\n', {}),
('# comment\n' + example_ini, example_as_dict),
('# comment\n' + example_json, example_as_dict),
('# comment\n' + example_json5, example_as_dict),
('# comment\n' + example_yaml_map, example_as_dict),
('# comment\n' + example_yaml_omap, example_as_ordered_dict),
# no test for toml, since it's not auto-recognized
])
def test_parse_recognizes_comments_in_ini_json_yaml(self, str, expected):
parsed = parse(str)
assert parsed == expected
assert type(parsed) == type(expected)
self.assert_unicode(parsed)
@pytest.mark.parametrize(('str, fmt, expected'), [
(types_ini, None, types_as_struct_with_objects),
(types_json, None, types_as_struct_with_objects),
(types_json5, 'json5', types_as_struct_with_objects),
(types_toml, 'toml', toml_types_as_struct_with_objects),
(types_xml, None, types_as_struct_with_objects),
(types_yaml, None, types_as_struct_with_objects),
])
def test_parse_force_types_true(self, str, fmt, expected):
assert parse(str, fmt) == expected
@pytest.mark.parametrize(('str', 'fmt', 'expected'), [
(types_ini, None, types_as_struct_with_strings),
(types_json, None, types_as_struct_with_strings),
(types_json5, 'json5', types_as_struct_with_strings),
(types_toml, 'toml', toml_types_as_struct_with_strings),
(types_xml, None, types_as_struct_with_strings),
(types_yaml, None, types_as_struct_with_strings),
])
def test_parse_force_types_false(self, str, fmt, expected):
assert parse(str, fmt, force_types=False) == expected
@pytest.mark.parametrize(('str', 'fmt', 'expected'), [
# Note: the expected result is backend-specific
(types_ini, None, {'x': {'a': '1', 'b': '1.1', 'c': 'None', 'd': 'True'}}),
(types_json, None, {'x': {'a': 1, 'b': 1.1, 'c': None, 'd': True}}),
(types_json5, 'json5', {'x': {'a': 1, 'b': 1.1, 'c': None, 'd': True}}),
(types_toml, 'toml', {'x': {'a': 1, 'b': 1.1,
'c': datetime(1987, 7, 5, 17, 45, tzinfo=TomlTz('Z')),
'd': True}}),
(types_xml, None, {'x': {'a': '1', 'b': '1.1', 'c': 'None', 'd': 'True'}}),
(types_yaml, None, {'x': {'a': 1, 'b': 1.1, 'c': 'None', 'd': True}}),
])
def test_parse_force_types_none(self, str, fmt, expected):
assert parse(str, fmt, force_types=None) == expected
def test_parse_works_with_bytes_yielding_file(self):
f = open(os.path.join(self.fixtures, 'empty.ini'), 'rb')
parsed = parse(f)
assert parsed == {}
def test_parse_works_with_unicode_yielding_file(self):
# on Python 2, this can only be simulated with io.open
f = io.open(os.path.join(self.fixtures, 'empty.ini'), encoding='utf-8')
parsed = parse(f)
assert parsed == {}
def test_parse_fails_on_wrong_format(self):
with pytest.raises(AnyMarkupError):
parse('foo: bar', format='xml')
@pytest.mark.parametrize(('file', 'expected'), [
# TODO: some parsers allow empty files, others don't - this should be made consistent
('empty.ini', {}),
('empty.json', AnyMarkupError),
('empty.json5', AnyMarkupError),
('empty.toml', {}),
('empty.xml', AnyMarkupError),
('empty.yaml', {}),
('example.ini', example_as_dict),
('example.json', example_as_dict),
('example.json5', example_as_dict),
('example.toml', toml_example_as_dict),
('example.xml', example_as_ordered_dict),
('example.yaml', example_as_dict),
])
def test_parse_file_basic(self, file, expected):
f = os.path.join(self.fixtures, file)
if expected == AnyMarkupError:
with pytest.raises(AnyMarkupError):
parse_file(f)
else:
parsed = parse_file(f)
assert parsed == expected
self.assert_unicode(parsed)
def test_parse_file_noextension(self):
parsed = parse_file(os.path.join(self.fixtures, 'without_extension'))
assert parsed == example_as_dict
self.assert_unicode(parsed)
def test_parse_file_fails_on_bad_extension(self):
with pytest.raises(AnyMarkupError):
parse_file(os.path.join(self.fixtures, 'bad_extension.xml'))
def test_parse_file_respects_force_types(self):
f = os.path.join(self.fixtures, 'types.json')
parsed = parse_file(f, force_types=True)
assert parsed == {'a': 1, 'b': 1}
parsed = parse_file(f, force_types=False)
assert parsed == {'a': '1', 'b': '1'}
parsed = parse_file(f, force_types=None)
assert parsed == {'a': 1, 'b': '1'}
| bsd-3-clause | 6,279,732,919,536,762,000 | 39.886486 | 96 | 0.573506 | false |
westial/restfulcomm | restfulcomm/servers/superserver.py | 1 | 1705 | # -*- coding: utf-8 -*-
"""Super server class"""
from abc import ABCMeta, abstractmethod
from werkzeug.wrappers import Response
class CommServer(metaclass=ABCMeta):
@abstractmethod
def __init__(self, server_resources, configuration):
"""Configures the resource for exit
Args:
server_resources: list ServerResource classes definition
Keyword args:
configuration: Config server configuration object
"""
self._resources = server_resources
self._configuration = configuration
@abstractmethod
def _create_rules(self):
"""Creates and configures the rules for the server resources given
on instance construction"""
pass
@abstractmethod
def _dispatch_request(self, **kwargs):
"""Runs the command action by the given resource endpoint"""
pass
@abstractmethod
def is_listening(self):
pass
@abstractmethod
def listen(self):
"""Request the given content to server and return the response"""
pass
@abstractmethod
def stop(self):
"""Shutdown server"""
pass
@abstractmethod
def reset(self):
"""Clean and shutdown server"""
pass
# Predefined responses. They can be overwritten by implementation
@classmethod
def not_found(cls):
"""Returns a not found http response
:return: HTTP Response
"""
return Response('Not Found', status=404)
@classmethod
def method_not_allowed(cls):
"""Returns a method not allowed http response
:return: HTTP Response
"""
return Response('Method not allowed', status=405)
| gpl-3.0 | -7,137,966,051,872,183,000 | 24.447761 | 74 | 0.628739 | false |
CFIS-Octarine/octarine | planning/ObsStatus.py | 1 | 9227 | from __future__ import absolute_import
import argparse
import logging
import math
import sys
import tempfile
import time
import ephem
import matplotlib
import requests
from astropy.io.votable import parse
matplotlib.use('Agg')
from matplotlib.pyplot import figure, close
from matplotlib.patches import Rectangle
from matplotlib.backends.backend_pdf import PdfPages
from src.daomop import (storage)
from src.planning import parameters
dbimages = 'vos:jkavelaars/CFIS/dbimages'
storage.DBIMAGES = dbimages
parameters.RUNIDS = ['17AP30', '17AP31']
def query_for_observations(mjd, observable, runids):
"""Do a QUERY on the TAP service for all observations that are part of runid,
where taken after mjd and have calibration 'observable'.
Schema is at: http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/tap/tables
mjd : float
observable: str ( 2 or 1 )
runid: tuple eg. ('13AP05', '13AP06')
"""
data = {"QUERY": ("SELECT Observation.target_name as TargetName, "
"COORD1(CENTROID(Plane.position_bounds)) AS RA,"
"COORD2(CENTROID(Plane.position_bounds)) AS DEC, "
"Plane.time_bounds_lower AS StartDate, "
"Plane.time_exposure AS ExposureTime, "
"Observation.instrument_name AS Instrument, "
"Plane.energy_bandpassName AS Filter, "
"Observation.observationID AS dataset_name, "
"Observation.proposal_id AS ProposalID, "
"Observation.proposal_pi AS PI "
"FROM caom2.Observation AS Observation "
"JOIN caom2.Plane AS Plane ON "
"Observation.obsID = Plane.obsID "
"WHERE ( Observation.collection = 'CFHT' ) "
"AND Plane.time_bounds_lower > %d "
"AND Plane.calibrationLevel=%s "
"AND Observation.proposal_id IN %s " ) %
( mjd, observable, str(runids)),
"REQUEST": "doQuery",
"LANG": "ADQL",
"FORMAT": "votable"}
result = requests.get(storage.TAP_WEB_SERVICE, params=data, verify=False)
assert isinstance(result, requests.Response)
logging.debug("Doing TAP Query using url: %s" % (str(result.url)))
tmpFile = tempfile.NamedTemporaryFile()
with open(tmpFile.name, 'w') as outfile:
outfile.write(result.text)
try:
vot = parse(tmpFile.name).get_first_table()
except:
print result.text
raise
vot.array.sort(order='StartDate')
t = vot.array
tmpFile.close()
logging.debug("Got {} lines from tap query".format(len(t)))
return t
def create_ascii_table(obsTable, outfile):
"""Given a table of observations create an ascii log file for easy parsing.
Store the result in outfile (could/should be a vospace dataNode)
obsTable: astropy.votable.array object
outfile: str (target_name of the vospace dataNode to store the result to)
"""
logging.info("writing text log to %s" % ( outfile))
stamp = "#\n# Last Updated: " + time.asctime() + "\n#\n"
header = "| %20s | %20s | %20s | %20s | %20s | %20s | %20s |\n" % (
"EXPNUM", "OBS-DATE", "FIELD", "EXPTIME(s)", "RA", "DEC", "RUNID")
bar = "=" * (len(header) - 1) + "\n"
if outfile[0:4] == "vos:":
tmpFile = tempfile.NamedTemporaryFile(suffix='.txt')
fout = tmpFile
else:
fout = open(outfile, 'w')
t2 = None
fout.write(bar + stamp + bar + header)
populated = storage.list_dbimages(dbimages=dbimages)
for i in range(len(obsTable) - 1, -1, -1):
row = obsTable.data[i]
if row['dataset_name'] not in populated:
storage.populate(row['dataset_name'])
sDate = str(ephem.date(row.StartDate +
2400000.5 -
ephem.julian_date(ephem.date(0))))[:20]
t1 = time.strptime(sDate, "%Y/%m/%d %H:%M:%S")
if t2 is None or math.fabs(time.mktime(t2) - time.mktime(t1)) > 3 * 3600.0:
fout.write(bar)
t2 = t1
ra = str(ephem.hours(math.radians(row.RA)))
dec = str(ephem.degrees(math.radians(row.DEC)))
line = "| %20s | %20s | %20s | %20.1f | %20s | %20s | %20s |\n" % (
str(row.dataset_name),
str(ephem.date(row.StartDate + 2400000.5 -
ephem.julian_date(ephem.date(0))))[:20],
row.TargetName[:20],
row.ExposureTime, ra[:20], dec[:20], row.ProposalID[:20] )
fout.write(line)
fout.write(bar)
if outfile[0:4] == "vos:":
fout.flush()
storage.copy(tmpFile.name, outfile)
fout.close()
return
def create_sky_plot(obstable, outfile, night_count=1, stack=True):
"""Given a VOTable that describes the observation coverage provide a PDF of the skycoverge.
obstable: vostable.arrary
stack: BOOL (true: stack all the observations in a series of plots)
"""
# camera dimensions
width = 0.98
height = 0.98
if outfile[0:4] == 'vos:':
tmpFile = tempfile.NamedTemporaryFile(suffix='.pdf')
pdf = PdfPages(tmpFile.name)
else:
pdf = PdfPages(outfile)
saturn = ephem.Saturn()
uranus = ephem.Uranus()
t2 = None
fig = None
proposalID = None
limits = {'13A': ( 245, 200, -20, 0),
'13B': ( 0, 45, 0, 20)}
for row in reversed(obstable.data):
date = ephem.date(row.StartDate + 2400000.5 - ephem.julian_date(ephem.date(0)))
sDate = str(date)
# Saturn only a problem in 2013A fields
saturn.compute(date)
sra = math.degrees(saturn.ra)
sdec = math.degrees(saturn.dec)
uranus.compute(date)
ura = math.degrees(uranus.ra)
udec = math.degrees(uranus.dec)
t1 = time.strptime(sDate, "%Y/%m/%d %H:%M:%S")
if t2 is None or (math.fabs(time.mktime(t2) - time.mktime(
t1)) > 3 * 3600.0 and opt.stack) or proposalID is None or proposalID != row.ProposalID:
if fig is not None:
pdf.savefig()
close()
proposalID = row.ProposalID
fig = figure(figsize=(7, 2))
ax = fig.add_subplot(111, aspect='equal')
ax.set_title("Data taken on %s-%s-%s" % ( t1.tm_year, t1.tm_mon, t1.tm_mday), fontdict={'fontsize': 8})
ax.axis(limits.get(row.ProposalID[0:3], (0, 20, 0, 20))) # appropriate only for 2013A fields
ax.grid()
ax.set_xlabel("RA (deg)", fontdict={'fontsize': 8})
ax.set_ylabel("DEC (deg)", fontdict={'fontsize': 8})
t2 = t1
ra = row.RA - width / 2.0
dec = row.DEC - height / 2.0
color = 'b'
if 'W' in row['TargetName']:
color = 'g'
ax.add_artist(Rectangle(xy=(ra, dec), height=height, width=width,
edgecolor=color, facecolor=color,
lw=0.5, fill='g', alpha=0.33))
ax.add_artist(Rectangle(xy=(sra, sdec), height=0.3, width=0.3,
edgecolor='r',
facecolor='r',
lw=0.5, fill='k', alpha=0.33))
ax.add_artist(Rectangle(xy=(ura, udec), height=0.3, width=0.3,
edgecolor='b',
facecolor='b',
lw=0.5, fill='b', alpha=0.33))
if ax is not None:
ax.axis((270, 215, -20, 0))
pdf.savefig()
close()
pdf.close()
if outfile[0:4] == "vos:":
storage.copy(tmpFile.name, outfile)
tmpFile.close()
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Query the CADC for OSSOS observations.")
parser.add_argument('date', nargs='?', action='store',
default=parameters.SURVEY_START)
parser.add_argument('--runid', nargs='*', action='store',
default=parameters.RUNIDS)
parser.add_argument('--cal', action='store', default=1)
parser.add_argument('--outfile', action='store',
default='vos:OSSOS/ObservingStatus/obsList')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--stack', action='store_true', default=False,
help=( "Make single status plot that stacks"
" data accross multiple nights, instead of nightly sub-plots." ))
opt = parser.parse_args()
runids = tuple(opt.runid)
if opt.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
try:
mjd_yesterday = ephem.date(ephem.julian_date(ephem.date(opt.date))) - 2400000.5
except Exception as e:
logging.error("you said date = %s" % (opt.date))
logging.error(str(e))
sys.exit(-1)
obs_table = query_for_observations(mjd_yesterday, opt.cal, runids)
create_ascii_table(obs_table, opt.outfile + ".txt")
# create_sky_plot(obs_table, opt.outfile + ".pdf", stack=opt.stack)
| gpl-3.0 | -8,040,165,882,080,599,000 | 34.08365 | 115 | 0.563672 | false |
enthought/uchicago-pyanno | pyanno/ui/annotations_view.py | 1 | 9266 | # Copyright (c) 2011, Enthought, Ltd.
# Author: Pietro Berkes <[email protected]>
# License: Modified BSD license (2-clause)
from traits.has_traits import HasTraits, on_trait_change
from traits.trait_numeric import Array
from traits.trait_types import (Instance, Int, ListFloat, Button, Event, File,
Any)
from traits.traits import Property
from traitsui.api import View, VGroup
from traitsui.editors.file_editor import FileEditor
from traitsui.editors.range_editor import RangeEditor
from traitsui.editors.tabular_editor import TabularEditor
from traitsui.group import HGroup, VGrid, Group
from traitsui.handler import ModelView
from traitsui.item import Item, Spring, Label
from traitsui.menu import OKCancelButtons
from pyanno.annotations import AnnotationsContainer
from pyanno.ui.appbase.wx_utils import is_display_small
from pyanno.ui.arrayview import Array2DAdapter
from pyanno.plots.hinton_plot import HintonDiagramPlot
from pyanno.util import labels_frequency, MISSING_VALUE, PyannoValueError
import numpy as np
import logging
logger = logging.getLogger(__name__)
WIDTH_CELL = 60
MAX_WIDTH = 1000
W_MARGIN = 150
class DataView(HasTraits):
data = Array(dtype=object)
def traits_view(self):
ncolumns = len(self.data[0])
w_table = min(WIDTH_CELL * ncolumns, MAX_WIDTH)
w_view = min(w_table + W_MARGIN, MAX_WIDTH)
return View(
Group(
Item('data',
editor=TabularEditor
(
adapter=Array2DAdapter(ncolumns=ncolumns,
format='%s',
show_index=True)),
show_label=False,
width=w_table,
padding=10),
),
title='Annotations',
width=w_view,
height=800,
resizable=True,
buttons=OKCancelButtons
)
class AnnotationsView(ModelView):
""" Traits UI Model/View for annotations."""
# reference to main application
application = Any
### Model-related traits ###
# container for annotations and their metadata
annotations_container = Instance(AnnotationsContainer)
# this can be set by the current model (could be different from the
# number of classes in the annotations themselves)
nclasses = Int(1)
frequency = ListFloat
@on_trait_change('annotations_container,annotations_updated,nclasses')
def _update_frequency(self):
nclasses = max(self.nclasses, self.annotations_container.nclasses)
try:
frequency = labels_frequency(
self.annotations_container.annotations,
nclasses).tolist()
except PyannoValueError as e:
logger.debug(e)
frequency = np.zeros((nclasses,)).tolist()
self.frequency = frequency
self.frequency_plot = HintonDiagramPlot(
data=self.frequency,
title='Observed label frequencies')
### Traits UI definitions ###
# event raised when annotations are updated
annotations_updated = Event
## frequency plot definition
frequency_plot = Instance(HintonDiagramPlot)
## edit data button opens annotations editor
edit_data = Button(label='Edit annotations...')
# save current annotations
save_data = Button(label='Save annotations...')
def _edit_data_fired(self):
data_view = DataView(data=self.annotations_container.raw_annotations)
data_view.edit_traits(kind='livemodal', parent=self.info.ui.control)
self.annotations_container = AnnotationsContainer.from_array(
data_view.data,
name = self.annotations_container.name
)
if self.application is not None:
self.application.main_window.set_annotations(
self.annotations_container)
def _save_data_fired(self):
save_filename = SaveAnnotationsDialog.open()
if save_filename is not None:
self.annotations_container.save_to(save_filename, set_name=True)
if self.application is not None:
self.application.main_window.set_annotations(
self.annotations_container)
### View definition ###
_name = Property
def _get__name(self):
return self.annotations_container.name
_nitems = Property
def _get__nitems(self):
return self.annotations_container.nitems
_nclasses = Property
def _get__nclasses(self):
return self.annotations_container.nclasses
_labels = Property
def _get__labels(self):
return str(self.annotations_container.labels)
_nannotators = Property
def _get__nannotators(self):
return str(self.annotations_container.nannotators)
def traits_view(self):
if is_display_small():
w_view = 350
else:
w_view = 450
info_group = VGroup(
Item('_name',
label='Annotations name:',
style='readonly',
padding=0),
VGrid(
Item('_nclasses',
label='Number of classes:',
style='readonly',
width=10),
Item('_labels',
label='Labels:',
style='readonly'),
Item('_nannotators',
label='Number of annotators:',
style='readonly', width=10),
Item('_nitems',
label='Number of items:',
style='readonly'),
padding=0
),
padding=0
)
body = VGroup(
info_group,
Item('_'),
HGroup(
VGroup(
Spring(),
Item('frequency_plot',
style='custom',
resizable=False,
show_label=False,
width=w_view
),
Spring()
),
Spring(),
VGroup(
Spring(),
Item('edit_data',
enabled_when='annotations_are_defined',
show_label=False),
Item('save_data',
enabled_when='annotations_are_defined',
show_label=False),
Spring()
)
),
Spring(),
Item('_'),
)
traits_view = View(body)
return traits_view
class SaveAnnotationsDialog(HasTraits):
filename = File
def _filename_default(self):
import os
home = os.getenv('HOME') or os.getenv('HOMEPATH')
return os.path.join(home, 'annotations.txt')
@staticmethod
def open():
dialog = SaveAnnotationsDialog()
dialog_ui = dialog.edit_traits(kind='modal')
if dialog_ui.result:
# user presser 'OK'
return dialog.filename
else:
return None
traits_view = View(
Item('filename', label='Save to:',
editor=FileEditor(allow_dir=False,
dialog_style='save',
entries=0),
style='simple'
),
width = 400,
resizable = True,
buttons = ['OK', 'Cancel']
)
class CreateNewAnnotationsDialog(HasTraits):
nannotators = Int(8)
nitems = Int(100)
@staticmethod
def create_annotations_dialog():
dialog = CreateNewAnnotationsDialog()
dialog_ui = dialog.edit_traits(kind='modal')
if dialog_ui.result:
# user pressed 'Ok'
annotations = np.empty((dialog.nitems, dialog.nannotators),
dtype=int)
annotations.fill(MISSING_VALUE)
return annotations
else:
return None
def traits_view(self):
view = View(
VGroup(
Item(
'nannotators',
editor=RangeEditor(mode='spinner', low=3, high=1000),
label='Number of annotators:'
),
Item(
'nitems',
editor=RangeEditor(mode='spinner', low=2, high=1000000),
label='Number of items'
),
),
buttons = ['OK', 'Cancel']
)
return view
#### Testing and debugging ####################################################
def main():
""" Entry point for standalone testing/debugging. """
from pyanno.modelBt_loopdesign import ModelBtLoopDesign
model = ModelBtLoopDesign.create_initial_state(5)
annotations = model.generate_annotations(2)
anno = AnnotationsContainer.from_array(annotations, name='blah')
model_view = AnnotationsView(annotations_container=anno, model=HasTraits())
model_view.configure_traits()
return model, annotations, model_view
if __name__ == '__main__':
m, a, mv = main()
| bsd-2-clause | -7,870,631,480,164,798,000 | 29.281046 | 79 | 0.54878 | false |
try-dash-now/idash | lib/winTelnet.py | 1 | 17448 | __author__ = 'Sean Yu'
'''created @2015/9/14'''
'''a windows telnet session'''
from telnetlib import Telnet as spawn
import socket
import select
# Tunable parameters
DEBUGLEVEL = 0
# Telnet protocol defaults
TELNET_PORT = 23
# Telnet protocol characters (don't change)
IAC = chr(255) # "Interpret As Command"
DONT = chr(254)
DO = chr(253)
WONT = chr(252)
WILL = chr(251)
theNULL = chr(0)
SE = chr(240) # Subnegotiation End
NOP = chr(241) # No Operation
DM = chr(242) # Data Mark
BRK = chr(243) # Break
IP = chr(244) # Interrupt process
AO = chr(245) # Abort output
AYT = chr(246) # Are You There
EC = chr(247) # Erase Character
EL = chr(248) # Erase Line
GA = chr(249) # Go Ahead
SB = chr(250) # Subnegotiation Begin
# Telnet protocol options code (don't change)
# These ones all come from arpa/telnet.h
BINARY = chr(0) # 8-bit data path
ECHO = chr(1) # echo
RCP = chr(2) # prepare to reconnect
SGA = chr(3) # suppress go ahead
NAMS = chr(4) # approximate message size
STATUS = chr(5) # give status
TM = chr(6) # timing mark
RCTE = chr(7) # remote controlled transmission and echo
NAOL = chr(8) # negotiate about output line width
NAOP = chr(9) # negotiate about output page size
NAOCRD = chr(10) # negotiate about CR disposition
NAOHTS = chr(11) # negotiate about horizontal tabstops
NAOHTD = chr(12) # negotiate about horizontal tab disposition
NAOFFD = chr(13) # negotiate about formfeed disposition
NAOVTS = chr(14) # negotiate about vertical tab stops
NAOVTD = chr(15) # negotiate about vertical tab disposition
NAOLFD = chr(16) # negotiate about output LF disposition
XASCII = chr(17) # extended ascii character set
LOGOUT = chr(18) # force logout
BM = chr(19) # byte macro
DET = chr(20) # data entry dutinal
SUPDUP = chr(21) # supdup protocol
SUPDUPOUTPUT = chr(22) # supdup output
SNDLOC = chr(23) # send location
TTYPE = chr(24) # dutinal type
EOR = chr(25) # end or record
TUID = chr(26) # TACACS user identification
OUTMRK = chr(27) # output marking
TTYLOC = chr(28) # dutinal location number
VT3270REGIME = chr(29) # 3270 regime
X3PAD = chr(30) # X.3 PAD
NAWS = chr(31) # window size
TSPEED = chr(32) # dutinal speed
LFLOW = chr(33) # remote flow control
LINEMODE = chr(34) # Linemode option
XDISPLOC = chr(35) # X Display Location
OLD_ENVIRON = chr(36) # Old - Environment variables
AUTHENTICATION = chr(37) # Authenticate
ENCRYPT = chr(38) # Encryption option
NEW_ENVIRON = chr(39) # New - Environment variables
# the following ones come from
# http://www.iana.org/assignments/telnet-options
# Unfortunately, that document does not assign identifiers
# to all of them, so we are making them up
TN3270E = chr(40) # TN3270E
XAUTH = chr(41) # XAUTH
CHARSET = chr(42) # CHARSET
RSP = chr(43) # Telnet Remote Serial Port
COM_PORT_OPTION = chr(44) # Com Port Control Option
SUPPRESS_LOCAL_ECHO = chr(45) # Telnet Suppress Local Echo
TLS = chr(46) # Telnet Start TLS
KERMIT = chr(47) # KERMIT
SEND_URL = chr(48) # SEND-URL
FORWARD_X = chr(49) # FORWARD_X
PRAGMA_LOGON = chr(138) # TELOPT PRAGMA LOGON
SSPI_LOGON = chr(139) # TELOPT SSPI LOGON
PRAGMA_HEARTBEAT = chr(140) # TELOPT PRAGMA HEARTBEAT
EXOPL = chr(255) # Extended-Options-List
NOOPT = chr(0)
from dut import dut
import threading
import os
import time
import re
class winTelnet(dut, object):#, spawn
def __del__(self):
self.SessionAlive= False
time.sleep(0.1)
if self.sock:
self.write('exit')
self.write('exit')
self.write('exit')
self.send(']',Ctrl=True)
self.write('quit')
#self.sock.close()
def __init__(self, name, attr =None,logger=None, logpath= None, shareData=None):
dut.__init__(self, name,attr,logger, logpath , shareData)
try:
host=""
port=23
reHostOnly= re.compile('\s*telnet\s+([\d.\w\-_]+)\s*',re.I)
reHostPort = re.compile('\s*telnet\s+([\d.\w]+)\s+(\d+)', re.I )
command = self.attribute.get('CMD')
m1=re.match(reHostOnly, command)
m2=re.match(reHostPort, command)
if m2:
host= m2.group(1)
port= int(m2.group(2))
elif m1:
host= m1.group(1)
#import socket
#timeout = 30
#self.sock = socket.create_connection((host, port), timeout)
self.debuglevel = DEBUGLEVEL
self.host = host
self.port = port
timeout=0.5
self.timeout = timeout
self.sock = None
self.rawq = ''
self.irawq = 0
self.cookedq = ''
self.eof = 0
self.iacseq = '' # Buffer for IAC sequence.
self.sb = 0 # flag for SB and SE sequence.
self.sbdataq = ''
self.option_callback = None
self._has_poll = hasattr(select, 'poll')
if host is not None and self.is_simulation() == False:
self.open(str(host), port, timeout)
th =threading.Thread(target=self.ReadOutput)
th.start()
time.sleep(1)
if self.attribute.has_key('LOGIN'):
self.login()
self.debuglevel=0
except Exception as e:
self.closeSession()
import traceback
print(traceback.format_exc())
raise e
def rawq_getchar(self):
"""Get next char from raw queue.
Block if no data is immediately available. Raise EOFError
when connection is closed.
"""
if not self.rawq:
self.fill_rawq()
if self.eof:
raise EOFError
c = self.rawq[self.irawq]
self.irawq = self.irawq + 1
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
return c
def write(self, buffer):
"""Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.
"""
buffer =buffer.encode(encoding='utf-8')
if IAC in buffer:
buffer = buffer.replace(IAC, IAC+IAC)
self.msg("send %r", buffer)
if self.sock:
self.sock.sendall(buffer)
try:
super(winTelnet, self).write()
except:
pass
def msg(self, msg, *args):
"""Print a debug message, when the debug level is > 0.
If extra arguments are present, they are substituted in the
message using the standard string formatting operator.
"""
if self.debuglevel > 0:
print ('Telnet(%s,%s):' % (self.host, self.port),)
if args:
print (msg % args)
else:
print (msg)
def fill_rawq(self):
"""Fill raw queue from exactly one recv() system call.
Block if no data is immediately available. Set self.eof when
connection is closed.
"""
if self.sock==0 or self.sock==None:
return
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
# The buffer size should be fairly small so as to avoid quadratic
# behavior in process_rawq() above
buf = self.sock.recv(50)
self.msg("recv %r", buf)
self.eof = (not buf)
self.rawq = self.rawq + buf
def process_rawq(self):
"""Transfer from raw queue to cooked queue.
Set self.eof when connection is closed. Don't block unless in
the midst of an IAC sequence.
"""
buf = ['', '']
try:
while self.rawq:
c = self.rawq_getchar()
if not self.iacseq:
if c == theNULL:
continue
if c == "\021":
continue
if c != IAC:
buf[self.sb] = buf[self.sb] + c
continue
else:
self.iacseq += c
elif len(self.iacseq) == 1:
# 'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
if c in (DO, DONT, WILL, WONT):
self.iacseq += c
continue
self.iacseq = ''
if c == IAC:
buf[self.sb] = buf[self.sb] + c
else:
if c == SB: # SB ... SE start.
self.sb = 1
self.sbdataq = ''
elif c == SE:
self.sb = 0
self.sbdataq = self.sbdataq + buf[1]
buf[1] = ''
if self.option_callback:
# Callback is supposed to look into
# the sbdataq
self.option_callback(self.sock, c, NOOPT)
else:
# We can't offer automatic processing of
# suboptions. Alas, we should not get any
# unless we did a WILL/DO before.
self.msg('IAC %d not recognized' % ord(c))
elif len(self.iacseq) == 2:
cmd = self.iacseq[1]
self.iacseq = ''
opt = c
if cmd in (DO, DONT):
self.msg('IAC %s %d',
cmd == DO and 'DO' or 'DONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + WONT + opt)
elif cmd in (WILL, WONT):
self.msg('IAC %s %d',
cmd == WILL and 'WILL' or 'WONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + DONT + opt)
except EOFError: # raised by self.rawq_getchar()
self.iacseq = '' # Reset on EOF
self.sb = 0
pass
self.cookedq = self.cookedq + self.removeSpecChar(buf[0])
self.sbdataq = self.sbdataq + buf[1]
def removeSpecChar(self, inputString):
#^@ \x00 \000 0
#^A \x01 \001 1
#^B \x02 \002 2
#^C \x03 \003 3
#^D \x04 \004 4
#^E \x05 \005 5
#^F \x06 \006 6
#^G \x07 \007 7
#^H \x08 \010 8
#^I \x09 \011 9
#^J \x0a \012 10
#^K \x0b \013 11
#^L \x0c \014 12
#^M \x0d \015 13
#^N \x0e \016 14
#^O \x0f \017 15
#^P \x10 \020 16
#^Q \x11 \021 17
#^R \x12 \022 18
#^S \x13 \023 19
#^T \x14 \024 20
#^U \x15 \025 21
#^V \x16 \026 22
#^W \x17 \027 23
#^X \x18 \030 24
#^Y \x19 \031 25
#^Z \x1a \032 26
#^[ \x1b \033 27
#^\ \x1c \034 28
#^] \x1d \035 29
#^^ \x1e \036 30
inputString = inputString.replace(chr(0x08), '')
inputString = inputString.replace(chr(0x03), '^C')
inputString = inputString.replace(chr(0x04), '^D')
inputString = inputString.replace(chr(0x18), '^X')
return inputString
def open(self, host, port=0, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Connect to a host.
The optional second argument is the port number, which
defaults to the standard telnet port (23).
Don't try to reopen an already connected instance.
"""
self.eof = 0
if not port:
port = TELNET_PORT
self.host = host
self.port = port
self.timeout = timeout
if self.is_simulation():
return
else:
self.sock = socket.create_connection((host, port), timeout)
def ReadOutput(self):
maxInterval = 60
if self.timestampCmd ==None:
self.timestampCmd= time.time()
fail_counter = 0
while self.SessionAlive:
try:
#if not self.sock:
# self.relogin()
if self.is_simulation():
if self.get_search_buffer()=='':
self.cookedq = self.fake_in.pop()
else:
if self.sock:
#self.info('time in ReadOutput',time.time(), 'timestampCmd', self.timestampCmd, 'max interval', maxInterval, 'delta', time.time()-self.timestampCmd)
if (time.time()-self.timestampCmd)>maxInterval:
self.write('\r\n')
self.timestampCmd = time.time()
#self.info('anti-idle', fail_counter )
else:
raise Exception('[Errno 10053] An established connection was aborted by the software in your host machine')
self.fill_rawq()
self.cookedq=''
self.process_rawq()
self.checkLine(self.cookedq)
self.lockStreamOut.acquire()
self.streamOut+=self.cookedq
self.lockStreamOut.release()
if self.logfile and self.cookedq.__len__()!=0:
self.logfile.write(self.cookedq)
self.logfile.flush()
#if fail_counter:
# self.info(fail_counter, 'time out error cleared')
fail_counter = 0
except KeyboardInterrupt:
break
except Exception as e:
if self.loginDone:
fail_counter+=1
if self.debuglevel and fail_counter%10==0:
print('\n%s Exception %d:'%(self.name, fail_counter)+e.__str__()+'\n')
if str(e).find('timed out')==-1:
self.error('fail_counter', fail_counter, 'max_output_time_out',self.max_output_time_out, e)
try:
if self.sock:
self.sock = 0
self.eof = 1
self.iacseq = ''
self.sb = 0
self.open(self.host,self.port,self.timeout)
if self.autoReloginFlag:
fail_counter = 0
th =threading.Thread(target=self.relogin)
th.start()
except Exception as e:
self.error('\n%s Exception: %d:'%(self.name, fail_counter)+e.__str__()+'\n')
if str(e) =='[Errno 10053] An established connection was aborted by the software in your host machine' or '[Errno 9] Bad file descriptor'==str(e) or str(e) =='[Errno 10054] An existing connection was forcibly closed by the remote host':
break
time.sleep(0.2)
self.closeSession()
def closeSession(self):
print('\nquit %s'%self.name)
self.SessionAlive = False
try:
for i in xrange(1,3,1):
self.send('exit')
self.sleep(0.5)
self.send(']',Ctrl=True)
self.send('quit')
self.logfile.flush()
except:
pass
def show(self):
'''return the delta of streamOut from last call of function Print,
and move idxUpdate to end of streamOut'''
newIndex = self.streamOut.__len__()
result = self.streamOut[self.idxUpdate : newIndex+1]
self.idxUpdate= newIndex
#print('print::%d'%result.__len__())
if result!='':
result= self.colorString(result)
print('\t%s'%(result.replace('\n', '\n\t')))
return result
def relogin(self, retry=1):
#time.sleep(3)
tmp_retry = 0
while tmp_retry< retry:
tmp_retry+=1
self.lockRelogin.acquire()
try:
if self.counterRelogin>0:
self.lockRelogin.release()
return
self.counterRelogin+=1
self.loginDone=False
if self.sock:
self.write('quit\n\r\n')
for i in range(0,3):
self.write('exit')
self.send(']',Ctrl=True)
self.send('quit')
self.send(']',Ctrl=True)
self.send('e')
self.sock.close()
self.sock = 0
self.eof = 1
self.iacseq = ''
self.sb = 0
self.info('retry login: %d/%d'%(tmp_retry,retry))
self.open(self.host,self.port,self.timeout)
import time
time.sleep(1)
self.login()
self.counterRelogin-=1
self.loginDone=True
break
except Exception as e:
self.counterRelogin-=1
self.lockRelogin.release()
if tmp_retry>retry:
raise e
else:
self.sleep(5)
self.lockRelogin.release()
| mit | 7,032,868,279,089,488,000 | 33.414201 | 256 | 0.504814 | false |
ValvePython/steam | steam/core/crypto.py | 1 | 3356 | """
All function in this module take and return :class:`bytes`
"""
import sys
from os import urandom as random_bytes
from struct import pack
from base64 import b64decode
from Cryptodome.Hash import MD5, SHA1, HMAC
from Cryptodome.PublicKey.RSA import import_key as rsa_import_key, construct as rsa_construct
from Cryptodome.Cipher import PKCS1_OAEP, PKCS1_v1_5
from Cryptodome.Cipher import AES as AES
class UniverseKey(object):
"""Public keys for Universes"""
Public = rsa_import_key(b64decode("""
MIGdMA0GCSqGSIb3DQEBAQUAA4GLADCBhwKBgQDf7BrWLBBmLBc1OhSwfFkRf53T
2Ct64+AVzRkeRuh7h3SiGEYxqQMUeYKO6UWiSRKpI2hzic9pobFhRr3Bvr/WARvY
gdTckPv+T1JzZsuVcNfFjrocejN1oWI0Rrtgt4Bo+hOneoo3S57G9F1fOpn5nsQ6
6WOiu4gZKODnFMBCiQIBEQ==
"""))
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * pack('B', BS - len(s) % BS)
if sys.version_info < (3,):
unpad = lambda s: s[0:-ord(s[-1])]
else:
unpad = lambda s: s[0:-s[-1]]
def generate_session_key(hmac_secret=b''):
"""
:param hmac_secret: optional HMAC
:type hmac_secret: :class:`bytes`
:return: (session_key, encrypted_session_key) tuple
:rtype: :class:`tuple`
"""
session_key = random_bytes(32)
encrypted_session_key = PKCS1_OAEP.new(UniverseKey.Public, SHA1)\
.encrypt(session_key + hmac_secret)
return (session_key, encrypted_session_key)
def symmetric_encrypt(message, key):
iv = random_bytes(BS)
return symmetric_encrypt_with_iv(message, key, iv)
def symmetric_encrypt_ecb(message, key):
return AES.new(key, AES.MODE_ECB).encrypt(pad(message))
def symmetric_encrypt_HMAC(message, key, hmac_secret):
prefix = random_bytes(3)
hmac = hmac_sha1(hmac_secret, prefix + message)
iv = hmac[:13] + prefix
return symmetric_encrypt_with_iv(message, key, iv)
def symmetric_encrypt_iv(iv, key):
return AES.new(key, AES.MODE_ECB).encrypt(iv)
def symmetric_encrypt_with_iv(message, key, iv):
encrypted_iv = symmetric_encrypt_iv(iv, key)
cyphertext = AES.new(key, AES.MODE_CBC, iv).encrypt(pad(message))
return encrypted_iv + cyphertext
def symmetric_decrypt(cyphertext, key):
iv = symmetric_decrypt_iv(cyphertext, key)
return symmetric_decrypt_with_iv(cyphertext, key, iv)
def symmetric_decrypt_ecb(cyphertext, key):
return unpad(AES.new(key, AES.MODE_ECB).decrypt(cyphertext))
def symmetric_decrypt_HMAC(cyphertext, key, hmac_secret):
""":raises: :class:`RuntimeError` when HMAC verification fails"""
iv = symmetric_decrypt_iv(cyphertext, key)
message = symmetric_decrypt_with_iv(cyphertext, key, iv)
hmac = hmac_sha1(hmac_secret, iv[-3:] + message)
if iv[:13] != hmac[:13]:
raise RuntimeError("Unable to decrypt message. HMAC does not match.")
return message
def symmetric_decrypt_iv(cyphertext, key):
return AES.new(key, AES.MODE_ECB).decrypt(cyphertext[:BS])
def symmetric_decrypt_with_iv(cyphertext, key, iv):
return unpad(AES.new(key, AES.MODE_CBC, iv).decrypt(cyphertext[BS:]))
def hmac_sha1(secret, data):
return HMAC.new(secret, data, SHA1).digest()
def sha1_hash(data):
return SHA1.new(data).digest()
def md5_hash(data):
return MD5.new(data).digest()
def rsa_publickey(mod, exp):
return rsa_construct((mod, exp))
def pkcs1v15_encrypt(key, message):
return PKCS1_v1_5.new(key).encrypt(message)
| mit | -1,118,242,270,544,146,800 | 30.660377 | 93 | 0.703516 | false |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/tests/python/unittest/test_lang_basic.py | 1 | 4858 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
def test_const():
x = tvm.const(1, "int32")
print(x.dtype)
assert x.dtype == tvm.int32
assert isinstance(x, tvm.expr.IntImm)
def test_make():
x = tvm.const(1, "int32")
y = tvm.var("x")
z = x + y
assert isinstance(tvm.max(x, y), tvm.expr.Max)
assert isinstance(tvm.min(x, y), tvm.expr.Min)
def test_ir():
x = tvm.const(1, "int32")
y = tvm.make.IntImm('int32', 1)
z = x + y
stmt = tvm.make.Evaluate(z)
assert isinstance(stmt, tvm.stmt.Evaluate)
def test_ir2():
x = tvm.var("n")
a = tvm.var("array", tvm.handle)
st = tvm.make.Store(a, x + 1, 1)
assert isinstance(st, tvm.stmt.Store)
assert(st.buffer_var == a)
def test_let():
x = tvm.var('x')
y = tvm.var('y')
stmt = tvm.make.LetStmt(
x, 10, tvm.make.Evaluate(x + 1));
def test_cast():
x = tvm.var('x', dtype="float32")
y = x.astype("int32")
z = x.astype("float32x4")
assert isinstance(y, tvm.expr.Cast)
assert isinstance(z, tvm.expr.Broadcast)
assert z.lanes == 4
def test_attr():
x = tvm.var('x')
y = tvm.var('y')
stmt = tvm.make.AttrStmt(
y, "stride", 10, tvm.make.Evaluate(x + 1));
assert stmt.node == y
a = tvm.convert(1)
assert a.value == 1
try:
a.no_field
assert False
except AttributeError:
pass
def test_basic():
a = tvm.var('a')
b = tvm.var('b')
c = a + b
assert str(c) == '(%s + %s)' % (a.name, b.name)
def test_stmt():
x = tvm.make.Evaluate(0)
tvm.make.For(tvm.var('i'), 0, 1,
tvm.stmt.For.Serial, 0,
x)
def test_dir():
x = tvm.var('x')
dir(x)
def test_dtype():
x = tvm.var('x')
assert x.dtype == 'int32'
y = tvm.var('y')
assert (x > y).dtype == 'bool'
def test_any():
x = tvm.var('x')
y = tvm.var('y')
z = tvm.var('z')
try:
t = x or x
assert False
except ValueError:
pass
try:
tvm.any()
assert False
except ValueError:
pass
assert str(tvm.any(x < y)) == '(%s < %s)' % (x.name, y.name)
assert str(tvm.any(x < y, x > z)) == '((%s < %s) || (%s > %s))' % (
x.name, y.name, x.name, z.name)
assert str(tvm.any(x < y, y > z + 1, x < z * 2)) == \
'(((%s < %s) || (%s > (%s + 1))) || (%s < (%s*2)))' % (
x.name, y.name, y.name, z.name, x.name, z.name)
def test_all():
x = tvm.var('x')
y = tvm.var('y')
z = tvm.var('z')
try:
t = x and x
assert False
except ValueError:
pass
try:
tvm.all()
assert False
except ValueError:
pass
assert str(tvm.all(x < y)) == '(%s < %s)' % (x.name, y.name)
assert str(tvm.all(x < y, x > z)) == '((%s < %s) && (%s > %s))' % (
x.name, y.name, x.name, z.name)
assert str(tvm.all(x < y, y > z + 1, x < z * 2)) == \
'(((%s < %s) && (%s > (%s + 1))) && (%s < (%s*2)))' % (
x.name, y.name, y.name, z.name, x.name, z.name)
def test_bitwise():
x = tvm.var('x')
y = tvm.var('y')
assert str(x << y) == 'shift_left(x, y)'
assert str(x >> y) == 'shift_right(x, y)'
assert str(x & y) == 'bitwise_and(x, y)'
assert str(x | y) == 'bitwise_or(x, y)'
assert str(x ^ y) == 'bitwise_xor(x, y)'
assert str(~x) == 'bitwise_not(x)'
assert(tvm.const(1, "int8x2") >> 1).dtype == "int8x2"
assert(x >> tvm.const(1, "int32x2")).dtype == "int32x2"
assert(tvm.var("z", "int8x2") << tvm.const(1, "int8x2")).dtype == "int8x2"
def test_equality():
a = tvm.var('a')
b = tvm.var('b')
c = (a == b)
assert not c
d = (c != c)
assert not d
def test_equality_string_imm():
x = 'a'
y = tvm.make.StringImm(x)
x == y.value
x == y
if __name__ == "__main__":
test_cast()
test_attr()
test_const()
test_make()
test_ir()
test_basic()
test_stmt()
test_let()
test_dir()
test_dtype()
test_any()
test_all()
test_bitwise()
test_equality()
test_equality_string_imm()
| apache-2.0 | -8,957,242,304,445,555,000 | 24.703704 | 78 | 0.535611 | false |
ddu7/PyLC | 063Unique Paths II.py | 1 | 1314 | # -*- coding: utf-8 -*-
# Follow up for "Unique Paths":
#
# Now consider if some obstacles are added to the grids. How many unique paths would there be?
#
# An obstacle and empty space is marked as 1 and 0 respectively in the grid.
#
# For example,
# There is one obstacle in the middle of a 3x3 grid as illustrated below.
#
# [ 1 1 1 1
# [0,0,0], 1 2 3 4
# [0,1,0], 1 3 * 4
# [0,0,0] 1 4 4 8
# ]
# The total number of unique paths is 2.
#
# Note: m and n will be at most 100.
# 直接对grid输入矩阵进行改造, 0 对应 1, 1 对应 0, 然后使用同样方法更新,返回最后一个值即可
class Solution():
def uniquePaths(self, grid):
m = len(grid)
n = len(grid[0])
for i in range(0, m):
for j in range(0, n):
if grid[i][j] == 0:
grid[i][j] = 1
else:
grid[i][j] = 0
for i in range(1, m):
for j in range(1, n):
if grid[i][j] != 0:
grid[i][j] = grid[i - 1][j] + grid[i][j - 1]
else:
grid[i][j] = 0
return grid[m - 1][n - 1]
print Solution().uniquePaths([
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]
]
) | mit | -4,832,800,005,191,433,000 | 26.711111 | 94 | 0.463082 | false |
mbourqui/django-echoices | echoices/enums/enums.py | 1 | 9631 | import warnings
from enum import Enum, EnumMeta
from types import DynamicClassAttribute
class EChoiceMeta(EnumMeta):
"""
Used to override some methods.
See Also
--------
https://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/#restrictions-with-multiple-metaclasses
"""
def __getitem__(cls, value):
try:
# Should always be there (at least in Python 3.5)
return cls._value2member_map_[value]
except AttributeError:
value2member_map_ = {}
for echoice in list(cls):
value2member_map_[echoice.value] = echoice
cls._value2member_map_ = value2member_map_
return cls._value2member_map_[value]
class EChoice(Enum, metaclass=EChoiceMeta):
"""
Custom Enum to ease the usage of choices outside the model.
Works by overriding the default _value_ field. This is done to offer a harmonized interface
when using auto-generated numeric values.
By the way, `value` is now the actual value to be stored in the DB.
Notes
-----
Interface differs slightly from the Enum:
`EChoice.value` returns the actual value to be stored in the DB, while the legacy `Enum.value`
would return the whole tuple used when defining the enumeration item.
Raises
------
AttributeError
in case of duplicated values
See Also
--------
http://stackoverflow.com/a/24105344
"""
def __new__(cls, value, label, *args, **kwargs):
if len(cls) == 0:
cls.__value_type_ = type(value)
# SEE: https://stackoverflow.com/a/35953630/
# SEE: https://docs.djangoproject.com/en/stable/ref/templates/api/#variables-and-lookups
cls.do_not_call_in_templates = True
else:
if type(value) is not cls.__value_type_:
raise TypeError("Incompatible type: {}. All values must be {}.".format(type(value), cls.__value_type_))
if value in [c.value for c in list(cls)]:
raise AttributeError(
"Duplicate value: '{}'. Only unique values are supported in {}.".format(value, EChoice))
obj = object.__new__(cls)
obj._value_ = value # Overrides default _value_
obj._label_ = label
return obj
@DynamicClassAttribute
def label(self):
"""The label of the Enum member."""
return self._label_
@property
def choice(self):
return self.value, self.label
def __call__(self, attr='value'):
"""
Hack to get the "selected" tag. Does actually nothing else than returning the attribute `attr`. If `attr` is
a callable, it will be called.
Gets called in `django.forms.boundfield#BoundField.initial`.
Parameters
----------
attr : str
Certainly not needed as redundant, but since __call__ is implemented anyway let's add a selector for the
field to return.
Returns
-------
`attr`, or `attr()` if `attr` is a callable
"""
attr = self.__getattribute__(attr)
if callable(attr):
return attr()
return attr
def __len__(self):
"""
If `len(value)` is supported, returns that length. Otherwise, returns 1.
This is mainly a hack to pass the validations. Since the validation ensures that the value will fit in the DB
field, it applies (solely?) on textual values. So it does no harm to return a non-null constant for a numeric
`value`.
Returns
-------
int : `len(value)` if supported, else 1.
"""
# FIXME: find a way to set it *only* to EChoice with values supporting len()
try:
return len(self.value)
except TypeError:
return 1
@classmethod
def values(cls):
"""
Returns
-------
tuple
of all the values of this Enum
"""
if not hasattr(cls, '__values_'):
cls.__values_ = tuple([c.value for c in list(cls)])
return cls.__values_
@classmethod
def max_value_length(cls):
"""
Not to be used when using numeric values.
Returns
-------
int
the maximal length required by this Enum to be stored in the database
"""
if not hasattr(cls, '__max_value_length_'):
cls.__max_value_length_ = max([len(c.value) for c in list(cls)])
return cls.__max_value_length_
@classmethod
def choices(cls):
"""
Generate the choices as required by Django models.
Returns
-------
tuple
"""
# "natural" order, aka as given when instantiating
if not hasattr(cls, '__choices_'):
cls.__choices_ = tuple([c.choice for c in list(cls)])
return cls.__choices_
@classmethod
def from_value(cls, value):
"""
Return the EChoice object associated with this value, if any.
Parameters
----------
value
In the type of the `value` field, as set when instantiating this EChoice.
Returns
-------
EChoice
Raises
------
KeyError
if `value` does not exist in any element
"""
warnings.warn("{0}.{1} will be deprecated in a future release. "
"Please use {0}.{2} instead".format(cls.__name__, cls.from_value.__name__, cls.get.__name__),
PendingDeprecationWarning)
return cls[value]
@classmethod
def get(cls, value, default=None):
"""
Return the EChoice object associated with this value, else `default`. If default is not given, it defaults to
None, so that this method never raises a KeyError.
Parameters
----------
value
In the type of the `value` field, as set when instantiating this EChoice.
default
Returned if the value is not found.
Returns
-------
EChoice
"""
try:
return cls[value]
except KeyError:
return default
@classmethod
def __getvaluetype__(cls):
return cls.__value_type_
@classmethod
def coerce(cls, other):
"""
Return the `value` in the type of the value of this EChoice. Typically, `value` is a string. Intended use case
is to convert `other` coming from a HTML form, typically a select choice.
Parameters
----------
other : str
Returns
-------
the `other` value in the type of the value of this EChoice.
"""
return cls.__value_type_(other)
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
try:
return self.value < other
except TypeError:
return self.value < self.coerce(other)
def __le__(self, other):
return self < other or self == other
def __eq__(self, other):
if self.__class__ is other.__class__:
return self.value == other.value
try:
return self.value == self.coerce(other)
except (TypeError, ValueError):
return False
def __ge__(self, other):
return self == other or self > other
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
try:
return self.value > other
except TypeError:
return self.value > self.coerce(other)
def __hash__(self):
# Somewhat required since comparison operators are defined
return super().__hash__()
class EOrderedChoice(EChoice):
"""Provide ordering of the elements"""
@classmethod
def choices(cls, order='natural'):
"""
Generate the choices as required by Django models.
Parameters
----------
order : str
in which the elements should be returned. Possible values are:
* 'sorted', the elements will be sorted by `value`
* 'reverse', the elements will be sorted by `value` as if each comparison were
reversed
* 'natural' (default), the elements are ordered as when instantiated in the enumeration
Returns
-------
iterable of tuple
"""
INC, DEC, NAT = 'sorted', 'reverse', 'natural'
options = [INC, DEC, NAT]
assert order in options, "Sorting order not recognized: {}. Available options are: {}".format(order, options)
if order in [INC, DEC]:
reverse = order == DEC
if reverse:
attr = '__choices_reverse_'
else:
attr = '__choices_sorted_'
if not hasattr(cls, attr):
setattr(cls, attr, tuple([(c.value, c.label) for c in sorted(list(cls), reverse=reverse)]))
return getattr(cls, attr)
else:
return super(EOrderedChoice, cls).choices()
class EAutoChoice(EOrderedChoice):
"""
Auto-generated numeric `value`s. Thus support sorting by `value`.
See Also
--------
https://docs.python.org/3.5/library/enum.html#autonumber
"""
def __new__(cls, label, *args, **kwargs):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
obj._label_ = label
return obj
| gpl-3.0 | -4,122,943,399,944,168,000 | 29.003115 | 119 | 0.554979 | false |
lagopus/lagopus | test/integration_test/tools/lib/ofp/ofp_group_mod.py | 1 | 1137 | import os
import sys
import copy
import logging
from checker import *
from .ofp import register_ofp_creators
from .ofp import OfpBase
from .ofp_bucket import SCE_BUCKETS
from .ofp_bucket import OfpBucketCreator
# YAML:
# group_mod:
# command: 0
# type: 0
# group_id: 0
# buckets:
# - bucket:
# weight: 0
# watch_port: 0
# watch_group: 0
# actions
# - output:
# port: 0
SCE_GROUP_MOD = "group_mod"
@register_ofp_creators(SCE_GROUP_MOD)
class OfpGroupModCreator(OfpBase):
@classmethod
def create(cls, test_case_obj, dp, ofproto, ofp_parser, params):
# GroupMod.
kws = copy.deepcopy(params)
# buckets.
buckets = []
if SCE_BUCKETS in params:
buckets = OfpBucketCreator.create(test_case_obj,
dp, ofproto,
ofp_parser,
params[SCE_BUCKETS])
kws[SCE_BUCKETS] = buckets
# create GroupMod.
msg = ofp_parser.OFPGroupMod(dp, **kws)
return msg
| apache-2.0 | 6,999,811,155,225,049,000 | 22.204082 | 68 | 0.5365 | false |
MatrixGamesHub/mtxPython | src/mtxNet/RendererClient.py | 1 | 4645 | """
mtxPython - A framework to create matrix games.
Copyright (C) 2016 Tobias Stampfl <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation in version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import mtx
from .rendererService import RendererService
from .rendererService.ttypes import LevelInfo, Value
from thrift.Thrift import TException
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
class RendererClient(mtx.Renderer):
def __init__(self, host, port):
self._transport = TSocket.TSocket(host, port)
protocol = TBinaryProtocol.TBinaryProtocol(self._transport)
self._client = RendererService.Client(protocol)
self._connected = False
self._host = host
self._port = port
def GetHost(self):
return self._host
def GetPort(self):
return self._port
def Connect(self):
try:
self._transport.open()
self._connected = True
except TTransport.TTransportException:
self._connected = False
return self._connected
def Disconnect(self):
self._transport.close()
self._connected = False
def IsConnected(self):
return self._connected
def _CallClientCommand(self, cmd, *args, **kwargs):
if not self._connected:
return False
try:
cmd(*args, **kwargs)
except TException:
logging.error("Connection to renderer client lost...", exc_info=1)
self.Disconnect()
def ProcessActGroup(self, actGrp):
self._CallClientCommand(self._client.Freeze)
try:
for act in actGrp:
if act.id == mtx.Act.CLEAR:
self._CallClientCommand(self._client.Clear)
elif act.id in mtx.Act.LEVEL:
level = act.level
field = level._field
netField = []
for y in range(field._height):
row = []
for x in range(field._width):
cell = []
for obj in reversed(field._cells[y][x]):
cell.append([obj._id, ord(obj._symbol)])
row.append(cell)
netField.append(row)
if act.id == mtx.Act.LOAD_LEVEL:
self._CallClientCommand(self._client.LoadLevel, netField, LevelInfo(level._name, level._groundTexture, level._wallTexture))
else:
self._CallClientCommand(self._client.ResetLevel, netField)
elif act.id == mtx.Act.UPDATE:
if type(act.value) == str:
value = Value(strValue=act.value)
elif type(act.value) == bool:
value = Value(boolValue=act.value)
elif type(act.value) == int:
value = Value(intValue=act.value)
else:
value = Value(doubleValue=act.value)
self._CallClientCommand(self._client.UpdateObject, act.objId, act.key, value)
elif act.id == mtx.Act.SPAWN:
self._CallClientCommand(self._client.Spawn, act.objId, ord(act.symbol), act.x, act.y)
elif act.id == mtx.Act.REMOVE:
self._CallClientCommand(self._client.Remove, act.objId, act.sourceId)
elif act.id == mtx.Act.COLLECT:
self._CallClientCommand(self._client.Collect, act.objId, act.sourceId)
elif act.id == mtx.Act.MOVE:
self._CallClientCommand(self._client.Move, act.objId, act.direction, act.fromX, act.fromY, act.toX, act.toY)
elif act.id == mtx.Act.JUMP:
self._CallClientCommand(self._client.Jump, act.objId, act.direction, act.fromX, act.fromY, act.toX, act.toY)
finally:
self._CallClientCommand(self._client.Thaw)
| gpl-3.0 | -2,398,894,517,825,313,300 | 38.033613 | 147 | 0.575027 | false |
WatanukiRasadar/kylin | kylin/_injector.py | 1 | 1117 | from functools import wraps
from typing import Callable
from ._scope import Scope
class Injector(Callable):
"""
class decorator to inject dependencies into a callable decorated function
"""
def __init__(self, dependencies: dict, fun: Callable):
self.dependencies = dependencies
self.fun = fun
@property
def scope(self) -> Scope:
return Scope()
def __call__(self, *args, **kwargs):
injections = {}
for dependency_name, service_name in self.dependencies.items():
injections[dependency_name] = kwargs.get(dependency_name) or self.scope[service_name]
kwargs.update(injections)
return self.fun(*args, **kwargs)
class Inject(Callable):
"""
class to recive the callable dependencies
"""
__injector__ = Injector
def __init__(self, **dependencies):
self.dependencies = dependencies
def __call__(self, fun: Callable):
def call(*args, **kwargs):
return self.__injector__(self.dependencies, fun).__call__(*args, **kwargs)
return wraps(fun).__call__(call)
| mit | -8,151,589,430,398,917,000 | 26.925 | 97 | 0.622202 | false |
tymofij/adofex | transifex/projects/templatetags/project_tags.py | 1 | 1755 | from django import template
from django.db.models import Sum
from transifex.languages.models import Language
from transifex.resources.models import RLStats, Resource
from transifex.txcommon.utils import StatBarsPositions
register = template.Library()
@register.inclusion_tag('resources/stats_bar_simple.html')
def progress_for_project(project, language_code=None, width=100):
"""Render a progressbar for the specified project."""
stats = RLStats.objects.by_project(project).filter(
language__code=language_code
).values('language__code').annotate(
trans=Sum('translated'),
untrans=Sum('untranslated')
).order_by()
total = Resource.objects.by_project(project).aggregate(
total_entities=Sum('total_entities')
)['total_entities']
if not stats:
# Project has no resources
bar_data = [
('trans', 0),
('untrans', 100)
]
return {
'untrans_percent': 100,
'trans_percent': 0,
'untrans': 0,
'trans': 0,
'pos': StatBarsPositions(bar_data, width),
'width': width
}
stats = stats[0]
translated = stats['trans']
untranslated = stats['untrans']
try:
translated_perc = translated * 100 / total
except ZeroDivisionError:
translated_perc = 100
untranslated_perc = 100 - translated_perc
bar_data = [
('trans', translated_perc),
('untrans', untranslated_perc)
]
return {
'untrans_percent': untranslated_perc,
'trans_percent': translated_perc,
'untrans': untranslated,
'trans': translated,
'pos': StatBarsPositions(bar_data, width),
'width': width
}
| gpl-3.0 | 2,672,739,128,950,525,400 | 27.306452 | 65 | 0.611966 | false |
google/google-ctf | third_party/edk2/BaseTools/Source/Python/GenFds/FdfParser.py | 1 | 193525 | ## @file
# parse FDF file
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2015, Hewlett Packard Enterprise Development, L.P.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import print_function
from __future__ import absolute_import
from re import compile, DOTALL
from string import hexdigits
from uuid import UUID
from Common.BuildToolError import *
from Common import EdkLogger
from Common.Misc import PathClass, tdict, ProcessDuplicatedInf
from Common.StringUtils import NormPath, ReplaceMacro
from Common import GlobalData
from Common.Expression import *
from Common.DataType import *
from Common.MultipleWorkspace import MultipleWorkspace as mws
import Common.LongFilePathOs as os
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.RangeExpression import RangeExpression
from collections import OrderedDict
from .Fd import FD
from .Region import Region
from .Fv import FV
from .AprioriSection import AprioriSection
from .FfsInfStatement import FfsInfStatement
from .FfsFileStatement import FileStatement
from .VerSection import VerSection
from .UiSection import UiSection
from .FvImageSection import FvImageSection
from .DataSection import DataSection
from .DepexSection import DepexSection
from .CompressSection import CompressSection
from .GuidSection import GuidSection
from .Capsule import EFI_CERT_TYPE_PKCS7_GUID, EFI_CERT_TYPE_RSA2048_SHA256_GUID, Capsule
from .CapsuleData import CapsuleFfs, CapsulePayload, CapsuleFv, CapsuleFd, CapsuleAnyFile, CapsuleAfile
from .RuleComplexFile import RuleComplexFile
from .RuleSimpleFile import RuleSimpleFile
from .EfiSection import EfiSection
from .OptionRom import OPTIONROM
from .OptRomInfStatement import OptRomInfStatement, OverrideAttribs
from .OptRomFileStatement import OptRomFileStatement
from .GenFdsGlobalVariable import GenFdsGlobalVariable
T_CHAR_CR = '\r'
T_CHAR_TAB = '\t'
T_CHAR_DOUBLE_QUOTE = '\"'
T_CHAR_SINGLE_QUOTE = '\''
T_CHAR_BRACE_R = '}'
SEPARATORS = {TAB_EQUAL_SPLIT, TAB_VALUE_SPLIT, TAB_COMMA_SPLIT, '{', T_CHAR_BRACE_R}
ALIGNMENTS = {"Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K", "64K", "128K",
"256K", "512K", "1M", "2M", "4M", "8M", "16M"}
ALIGNMENT_NOAUTO = ALIGNMENTS - {"Auto"}
CR_LB_SET = {T_CHAR_CR, TAB_LINE_BREAK}
RegionSizePattern = compile("\s*(?P<base>(?:0x|0X)?[a-fA-F0-9]+)\s*\|\s*(?P<size>(?:0x|0X)?[a-fA-F0-9]+)\s*")
RegionSizeGuidPattern = compile("\s*(?P<base>\w+\.\w+[\.\w\[\]]*)\s*\|\s*(?P<size>\w+\.\w+[\.\w\[\]]*)\s*")
RegionOffsetPcdPattern = compile("\s*(?P<base>\w+\.\w+[\.\w\[\]]*)\s*$")
ShortcutPcdPattern = compile("\s*\w+\s*=\s*(?P<value>(?:0x|0X)?[a-fA-F0-9]+)\s*\|\s*(?P<name>\w+\.\w+)\s*")
BaseAddrValuePattern = compile('^0[xX][0-9a-fA-F]+')
FileExtensionPattern = compile(r'([a-zA-Z][a-zA-Z0-9]*)')
TokenFindPattern = compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
AllIncludeFileList = []
# Get the closest parent
def GetParentAtLine (Line):
for Profile in AllIncludeFileList:
if Profile.IsLineInFile(Line):
return Profile
return None
# Check include loop
def IsValidInclude (File, Line):
for Profile in AllIncludeFileList:
if Profile.IsLineInFile(Line) and Profile.FileName == File:
return False
return True
def GetRealFileLine (File, Line):
InsertedLines = 0
for Profile in AllIncludeFileList:
if Profile.IsLineInFile(Line):
return Profile.GetLineInFile(Line)
elif Line >= Profile.InsertStartLineNumber and Profile.Level == 1:
InsertedLines += Profile.GetTotalLines()
return (File, Line - InsertedLines)
## The exception class that used to report error messages when parsing FDF
#
# Currently the "ToolName" is set to be "FdfParser".
#
class Warning (Exception):
## The constructor
#
# @param self The object pointer
# @param Str The message to record
# @param File The FDF name
# @param Line The Line number that error occurs
#
def __init__(self, Str, File = None, Line = None):
FileLineTuple = GetRealFileLine(File, Line)
self.FileName = FileLineTuple[0]
self.LineNumber = FileLineTuple[1]
self.OriginalLineNumber = Line
self.Message = Str
self.ToolName = 'FdfParser'
def __str__(self):
return self.Message
# helper functions to facilitate consistency in warnings
# each function is for a different common warning
@staticmethod
def Expected(Str, File, Line):
return Warning("expected {}".format(Str), File, Line)
@staticmethod
def ExpectedEquals(File, Line):
return Warning.Expected("'='", File, Line)
@staticmethod
def ExpectedCurlyOpen(File, Line):
return Warning.Expected("'{'", File, Line)
@staticmethod
def ExpectedCurlyClose(File, Line):
return Warning.Expected("'}'", File, Line)
@staticmethod
def ExpectedBracketClose(File, Line):
return Warning.Expected("']'", File, Line)
## The Include file content class that used to record file data when parsing include file
#
# May raise Exception when opening file.
#
class IncludeFileProfile:
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileName = FileName
self.FileLinesList = []
try:
with open(FileName, "r") as fsock:
self.FileLinesList = fsock.readlines()
for index, line in enumerate(self.FileLinesList):
if not line.endswith(TAB_LINE_BREAK):
self.FileLinesList[index] += TAB_LINE_BREAK
except:
EdkLogger.error("FdfParser", FILE_OPEN_FAILURE, ExtraData=FileName)
self.InsertStartLineNumber = None
self.InsertAdjust = 0
self.IncludeFileList = []
self.Level = 1 # first level include file
def GetTotalLines(self):
TotalLines = self.InsertAdjust + len(self.FileLinesList)
for Profile in self.IncludeFileList:
TotalLines += Profile.GetTotalLines()
return TotalLines
def IsLineInFile(self, Line):
if Line >= self.InsertStartLineNumber and Line < self.InsertStartLineNumber + self.GetTotalLines():
return True
return False
def GetLineInFile(self, Line):
if not self.IsLineInFile (Line):
return (self.FileName, -1)
InsertedLines = self.InsertStartLineNumber
for Profile in self.IncludeFileList:
if Profile.IsLineInFile(Line):
return Profile.GetLineInFile(Line)
elif Line >= Profile.InsertStartLineNumber:
InsertedLines += Profile.GetTotalLines()
return (self.FileName, Line - InsertedLines + 1)
## The FDF content class that used to record file data when parsing FDF
#
# May raise Exception when opening file.
#
class FileProfile:
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileLinesList = []
try:
with open(FileName, "r") as fsock:
self.FileLinesList = fsock.readlines()
except:
EdkLogger.error("FdfParser", FILE_OPEN_FAILURE, ExtraData=FileName)
self.FileName = FileName
self.PcdDict = OrderedDict()
self.PcdLocalDict = OrderedDict()
self.InfList = []
self.InfDict = {'ArchTBD':[]}
# ECC will use this Dict and List information
self.PcdFileLineDict = {}
self.InfFileLineList = []
self.FdDict = {}
self.FdNameNotSet = False
self.FvDict = {}
self.CapsuleDict = {}
self.RuleDict = {}
self.OptRomDict = {}
self.FmpPayloadDict = {}
## The syntax parser for FDF
#
# PreprocessFile method should be called prior to ParseFile
# CycleReferenceCheck method can detect cycles in FDF contents
#
# GetNext*** procedures mean these procedures will get next token first, then make judgement.
# Get*** procedures mean these procedures will make judgement on current token only.
#
class FdfParser:
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.Profile = FileProfile(FileName)
self.FileName = FileName
self.CurrentLineNumber = 1
self.CurrentOffsetWithinLine = 0
self.CurrentFdName = None
self.CurrentFvName = None
self._Token = ""
self._SkippedChars = ""
GlobalData.gFdfParser = self
# Used to section info
self._CurSection = []
# Key: [section name, UI name, arch]
# Value: {MACRO_NAME: MACRO_VALUE}
self._MacroDict = tdict(True, 3)
self._PcdDict = OrderedDict()
self._WipeOffArea = []
if GenFdsGlobalVariable.WorkSpaceDir == '':
GenFdsGlobalVariable.WorkSpaceDir = os.getenv("WORKSPACE")
## _SkipWhiteSpace() method
#
# Skip white spaces from current char.
#
# @param self The object pointer
#
def _SkipWhiteSpace(self):
while not self._EndOfFile():
if self._CurrentChar() in {TAB_PRINTCHAR_NUL, T_CHAR_CR, TAB_LINE_BREAK, TAB_SPACE_SPLIT, T_CHAR_TAB}:
self._SkippedChars += str(self._CurrentChar())
self._GetOneChar()
else:
return
return
## _EndOfFile() method
#
# Judge current buffer pos is at file end
#
# @param self The object pointer
# @retval True Current File buffer position is at file end
# @retval False Current File buffer position is NOT at file end
#
def _EndOfFile(self):
NumberOfLines = len(self.Profile.FileLinesList)
SizeOfLastLine = len(self.Profile.FileLinesList[-1])
if self.CurrentLineNumber == NumberOfLines and self.CurrentOffsetWithinLine >= SizeOfLastLine - 1:
return True
if self.CurrentLineNumber > NumberOfLines:
return True
return False
## _EndOfLine() method
#
# Judge current buffer pos is at line end
#
# @param self The object pointer
# @retval True Current File buffer position is at line end
# @retval False Current File buffer position is NOT at line end
#
def _EndOfLine(self):
if self.CurrentLineNumber > len(self.Profile.FileLinesList):
return True
SizeOfCurrentLine = len(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if self.CurrentOffsetWithinLine >= SizeOfCurrentLine:
return True
return False
## Rewind() method
#
# Reset file data buffer to the initial state
#
# @param self The object pointer
# @param DestLine Optional new destination line number.
# @param DestOffset Optional new destination offset.
#
def Rewind(self, DestLine = 1, DestOffset = 0):
self.CurrentLineNumber = DestLine
self.CurrentOffsetWithinLine = DestOffset
## _UndoOneChar() method
#
# Go back one char in the file buffer
#
# @param self The object pointer
# @retval True Successfully go back one char
# @retval False Not able to go back one char as file beginning reached
#
def _UndoOneChar(self):
if self.CurrentLineNumber == 1 and self.CurrentOffsetWithinLine == 0:
return False
elif self.CurrentOffsetWithinLine == 0:
self.CurrentLineNumber -= 1
self.CurrentOffsetWithinLine = len(self._CurrentLine()) - 1
else:
self.CurrentOffsetWithinLine -= 1
return True
## _GetOneChar() method
#
# Move forward one char in the file buffer
#
# @param self The object pointer
#
def _GetOneChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
else:
self.CurrentOffsetWithinLine += 1
## _CurrentChar() method
#
# Get the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Current char
#
def _CurrentChar(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine]
## _NextChar() method
#
# Get the one char pass the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Next char
#
def _NextChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
return self.Profile.FileLinesList[self.CurrentLineNumber][0]
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine + 1]
## _SetCurrentCharValue() method
#
# Modify the value of current char
#
# @param self The object pointer
# @param Value The new value of current char
#
def _SetCurrentCharValue(self, Value):
self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine] = Value
## _CurrentLine() method
#
# Get the list that contains current line contents
#
# @param self The object pointer
# @retval List current line contents
#
def _CurrentLine(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1]
def _StringToList(self):
self.Profile.FileLinesList = [list(s) for s in self.Profile.FileLinesList]
if not self.Profile.FileLinesList:
EdkLogger.error('FdfParser', FILE_READ_FAILURE, 'The file is empty!', File=self.FileName)
self.Profile.FileLinesList[-1].append(' ')
def _ReplaceFragment(self, StartPos, EndPos, Value = ' '):
if StartPos[0] == EndPos[0]:
Offset = StartPos[1]
while Offset <= EndPos[1]:
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
return
Offset = StartPos[1]
while self.Profile.FileLinesList[StartPos[0]][Offset] not in CR_LB_SET:
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
Line = StartPos[0]
while Line < EndPos[0]:
Offset = 0
while self.Profile.FileLinesList[Line][Offset] not in CR_LB_SET:
self.Profile.FileLinesList[Line][Offset] = Value
Offset += 1
Line += 1
Offset = 0
while Offset <= EndPos[1]:
self.Profile.FileLinesList[EndPos[0]][Offset] = Value
Offset += 1
def _SetMacroValue(self, Macro, Value):
if not self._CurSection:
return
MacroDict = {}
if not self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]]:
self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]] = MacroDict
else:
MacroDict = self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]]
MacroDict[Macro] = Value
def _GetMacroValue(self, Macro):
# Highest priority
if Macro in GlobalData.gCommandLineDefines:
return GlobalData.gCommandLineDefines[Macro]
if Macro in GlobalData.gGlobalDefines:
return GlobalData.gGlobalDefines[Macro]
if self._CurSection:
MacroDict = self._MacroDict[
self._CurSection[0],
self._CurSection[1],
self._CurSection[2]
]
if MacroDict and Macro in MacroDict:
return MacroDict[Macro]
# Lowest priority
if Macro in GlobalData.gPlatformDefines:
return GlobalData.gPlatformDefines[Macro]
return None
def _SectionHeaderParser(self, Section):
# [Defines]
# [FD.UiName]: use dummy instead if UI name is optional
# [FV.UiName]
# [Capsule.UiName]
# [Rule]: don't take rule section into account, macro is not allowed in this section
# [OptionRom.DriverName]
self._CurSection = []
Section = Section.strip()[1:-1].upper().replace(' ', '').strip(TAB_SPLIT)
ItemList = Section.split(TAB_SPLIT)
Item = ItemList[0]
if Item == '' or Item == 'RULE':
return
if Item == TAB_COMMON_DEFINES.upper():
self._CurSection = [TAB_COMMON, TAB_COMMON, TAB_COMMON]
elif len(ItemList) > 1:
self._CurSection = [ItemList[0], ItemList[1], TAB_COMMON]
elif len(ItemList) > 0:
self._CurSection = [ItemList[0], 'DUMMY', TAB_COMMON]
## PreprocessFile() method
#
# Preprocess file contents, replace comments with spaces.
# In the end, rewind the file buffer pointer to the beginning
# BUGBUG: No !include statement processing contained in this procedure
# !include statement should be expanded at the same FileLinesList[CurrentLineNumber - 1]
#
# @param self The object pointer
#
def PreprocessFile(self):
self.Rewind()
InComment = False
DoubleSlashComment = False
HashComment = False
# HashComment in quoted string " " is ignored.
InString = False
while not self._EndOfFile():
if self._CurrentChar() == T_CHAR_DOUBLE_QUOTE and not InComment:
InString = not InString
# meet new line, then no longer in a comment for // and '#'
if self._CurrentChar() == TAB_LINE_BREAK:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
if InComment and DoubleSlashComment:
InComment = False
DoubleSlashComment = False
if InComment and HashComment:
InComment = False
HashComment = False
# check for */ comment end
elif InComment and not DoubleSlashComment and not HashComment and self._CurrentChar() == TAB_STAR and self._NextChar() == TAB_BACK_SLASH:
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
InComment = False
# set comments to spaces
elif InComment:
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
# check for // comment
elif self._CurrentChar() == TAB_BACK_SLASH and self._NextChar() == TAB_BACK_SLASH and not self._EndOfLine():
InComment = True
DoubleSlashComment = True
# check for '#' comment
elif self._CurrentChar() == TAB_COMMENT_SPLIT and not self._EndOfLine() and not InString:
InComment = True
HashComment = True
# check for /* comment start
elif self._CurrentChar() == TAB_BACK_SLASH and self._NextChar() == TAB_STAR:
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
InComment = True
else:
self._GetOneChar()
# restore from ListOfList to ListOfString
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
self.Rewind()
## PreprocessIncludeFile() method
#
# Preprocess file contents, replace !include statements with file contents.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessIncludeFile(self):
# nested include support
Processed = False
MacroDict = {}
while self._GetNextToken():
if self._Token == TAB_DEFINE:
if not self._GetNextToken():
raise Warning.Expected("Macro name", self.FileName, self.CurrentLineNumber)
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
MacroDict[Macro] = Value
elif self._Token == TAB_INCLUDE:
Processed = True
IncludeLine = self.CurrentLineNumber
IncludeOffset = self.CurrentOffsetWithinLine - len(TAB_INCLUDE)
if not self._GetNextToken():
raise Warning.Expected("include file name", self.FileName, self.CurrentLineNumber)
IncFileName = self._Token
PreIndex = 0
StartPos = IncFileName.find('$(', PreIndex)
EndPos = IncFileName.find(')', StartPos+2)
while StartPos != -1 and EndPos != -1:
Macro = IncFileName[StartPos+2: EndPos]
MacroVal = self._GetMacroValue(Macro)
if not MacroVal:
if Macro in MacroDict:
MacroVal = MacroDict[Macro]
if MacroVal is not None:
IncFileName = IncFileName.replace('$(' + Macro + ')', MacroVal, 1)
if MacroVal.find('$(') != -1:
PreIndex = StartPos
else:
PreIndex = StartPos + len(MacroVal)
else:
raise Warning("The Macro %s is not defined" %Macro, self.FileName, self.CurrentLineNumber)
StartPos = IncFileName.find('$(', PreIndex)
EndPos = IncFileName.find(')', StartPos+2)
IncludedFile = NormPath(IncFileName)
#
# First search the include file under the same directory as FDF file
#
IncludedFile1 = PathClass(IncludedFile, os.path.dirname(self.FileName))
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
#
# Then search the include file under the same directory as DSC file
#
PlatformDir = ''
if GenFdsGlobalVariable.ActivePlatform:
PlatformDir = GenFdsGlobalVariable.ActivePlatform.Dir
elif GlobalData.gActivePlatform:
PlatformDir = GlobalData.gActivePlatform.MetaFile.Dir
IncludedFile1 = PathClass(IncludedFile, PlatformDir)
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
#
# Also search file under the WORKSPACE directory
#
IncludedFile1 = PathClass(IncludedFile, GlobalData.gWorkspace)
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
raise Warning("The include file does not exist under below directories: \n%s\n%s\n%s\n"%(os.path.dirname(self.FileName), PlatformDir, GlobalData.gWorkspace),
self.FileName, self.CurrentLineNumber)
if not IsValidInclude (IncludedFile1.Path, self.CurrentLineNumber):
raise Warning("The include file {0} is causing a include loop.\n".format (IncludedFile1.Path), self.FileName, self.CurrentLineNumber)
IncFileProfile = IncludeFileProfile(IncludedFile1.Path)
CurrentLine = self.CurrentLineNumber
CurrentOffset = self.CurrentOffsetWithinLine
# list index of the insertion, note that line number is 'CurrentLine + 1'
InsertAtLine = CurrentLine
ParentProfile = GetParentAtLine (CurrentLine)
if ParentProfile is not None:
ParentProfile.IncludeFileList.insert(0, IncFileProfile)
IncFileProfile.Level = ParentProfile.Level + 1
IncFileProfile.InsertStartLineNumber = InsertAtLine + 1
# deal with remaining portions after "!include filename", if exists.
if self._GetNextToken():
if self.CurrentLineNumber == CurrentLine:
RemainingLine = self._CurrentLine()[CurrentOffset:]
self.Profile.FileLinesList.insert(self.CurrentLineNumber, RemainingLine)
IncFileProfile.InsertAdjust += 1
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
for Line in IncFileProfile.FileLinesList:
self.Profile.FileLinesList.insert(InsertAtLine, Line)
self.CurrentLineNumber += 1
InsertAtLine += 1
# reversely sorted to better determine error in file
AllIncludeFileList.insert(0, IncFileProfile)
# comment out the processed include file statement
TempList = list(self.Profile.FileLinesList[IncludeLine - 1])
TempList.insert(IncludeOffset, TAB_COMMENT_SPLIT)
self.Profile.FileLinesList[IncludeLine - 1] = ''.join(TempList)
if Processed: # Nested and back-to-back support
self.Rewind(DestLine = IncFileProfile.InsertStartLineNumber - 1)
Processed = False
# Preprocess done.
self.Rewind()
@staticmethod
def _GetIfListCurrentItemStat(IfList):
if len(IfList) == 0:
return True
for Item in IfList:
if Item[1] == False:
return False
return True
## PreprocessConditionalStatement() method
#
# Preprocess conditional statement.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessConditionalStatement(self):
# IfList is a stack of if branches with elements of list [Pos, CondSatisfied, BranchDetermined]
IfList = []
RegionLayoutLine = 0
ReplacedLine = -1
while self._GetNextToken():
# Determine section name and the location dependent macro
if self._GetIfListCurrentItemStat(IfList):
if self._Token.startswith(TAB_SECTION_START):
Header = self._Token
if not self._Token.endswith(TAB_SECTION_END):
self._SkipToToken(TAB_SECTION_END)
Header += self._SkippedChars
if Header.find('$(') != -1:
raise Warning("macro cannot be used in section header", self.FileName, self.CurrentLineNumber)
self._SectionHeaderParser(Header)
continue
# Replace macros except in RULE section or out of section
elif self._CurSection and ReplacedLine != self.CurrentLineNumber:
ReplacedLine = self.CurrentLineNumber
self._UndoToken()
CurLine = self.Profile.FileLinesList[ReplacedLine - 1]
PreIndex = 0
StartPos = CurLine.find('$(', PreIndex)
EndPos = CurLine.find(')', StartPos+2)
while StartPos != -1 and EndPos != -1 and self._Token not in {TAB_IF_DEF, TAB_IF_N_DEF, TAB_IF, TAB_ELSE_IF}:
MacroName = CurLine[StartPos+2: EndPos]
MacroValue = self._GetMacroValue(MacroName)
if MacroValue is not None:
CurLine = CurLine.replace('$(' + MacroName + ')', MacroValue, 1)
if MacroValue.find('$(') != -1:
PreIndex = StartPos
else:
PreIndex = StartPos + len(MacroValue)
else:
PreIndex = EndPos + 1
StartPos = CurLine.find('$(', PreIndex)
EndPos = CurLine.find(')', StartPos+2)
self.Profile.FileLinesList[ReplacedLine - 1] = CurLine
continue
if self._Token == TAB_DEFINE:
if self._GetIfListCurrentItemStat(IfList):
if not self._CurSection:
raise Warning("macro cannot be defined in Rule section or out of section", self.FileName, self.CurrentLineNumber)
DefineLine = self.CurrentLineNumber - 1
DefineOffset = self.CurrentOffsetWithinLine - len(TAB_DEFINE)
if not self._GetNextToken():
raise Warning.Expected("Macro name", self.FileName, self.CurrentLineNumber)
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
self._SetMacroValue(Macro, Value)
self._WipeOffArea.append(((DefineLine, DefineOffset), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token == 'SET':
if not self._GetIfListCurrentItemStat(IfList):
continue
SetLine = self.CurrentLineNumber - 1
SetOffset = self.CurrentOffsetWithinLine - len('SET')
PcdPair = self._GetNextPcdSettings()
PcdName = "%s.%s" % (PcdPair[1], PcdPair[0])
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
Value = self._EvaluateConditional(Value, self.CurrentLineNumber, 'eval', True)
self._PcdDict[PcdName] = Value
self.Profile.PcdDict[PcdPair] = Value
self.SetPcdLocalation(PcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
self._WipeOffArea.append(((SetLine, SetOffset), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token in {TAB_IF_DEF, TAB_IF_N_DEF, TAB_IF}:
IfStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self._Token))
IfList.append([IfStartPos, None, None])
CondLabel = self._Token
Expression = self._GetExpression()
if CondLabel == TAB_IF:
ConditionSatisfied = self._EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'eval')
else:
ConditionSatisfied = self._EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'in')
if CondLabel == TAB_IF_N_DEF:
ConditionSatisfied = not ConditionSatisfied
BranchDetermined = ConditionSatisfied
IfList[-1] = [IfList[-1][0], ConditionSatisfied, BranchDetermined]
if ConditionSatisfied:
self._WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token in {TAB_ELSE_IF, TAB_ELSE}:
ElseStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self._Token))
if len(IfList) <= 0:
raise Warning("Missing !if statement", self.FileName, self.CurrentLineNumber)
if IfList[-1][1]:
IfList[-1] = [ElseStartPos, False, True]
self._WipeOffArea.append((ElseStartPos, (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self._WipeOffArea.append((IfList[-1][0], ElseStartPos))
IfList[-1] = [ElseStartPos, True, IfList[-1][2]]
if self._Token == TAB_ELSE_IF:
Expression = self._GetExpression()
ConditionSatisfied = self._EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'eval')
IfList[-1] = [IfList[-1][0], ConditionSatisfied, IfList[-1][2]]
if IfList[-1][1]:
if IfList[-1][2]:
IfList[-1][1] = False
else:
IfList[-1][2] = True
self._WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token == '!endif':
if len(IfList) <= 0:
raise Warning("Missing !if statement", self.FileName, self.CurrentLineNumber)
if IfList[-1][1]:
self._WipeOffArea.append(((self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len('!endif')), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self._WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
IfList.pop()
elif not IfList: # Don't use PCDs inside conditional directive
if self.CurrentLineNumber <= RegionLayoutLine:
# Don't try the same line twice
continue
SetPcd = ShortcutPcdPattern.match(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if SetPcd:
self._PcdDict[SetPcd.group('name')] = SetPcd.group('value')
RegionLayoutLine = self.CurrentLineNumber
continue
RegionSize = RegionSizePattern.match(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if not RegionSize:
RegionLayoutLine = self.CurrentLineNumber
continue
RegionSizeGuid = RegionSizeGuidPattern.match(self.Profile.FileLinesList[self.CurrentLineNumber])
if not RegionSizeGuid:
RegionLayoutLine = self.CurrentLineNumber + 1
continue
self._PcdDict[RegionSizeGuid.group('base')] = RegionSize.group('base')
self._PcdDict[RegionSizeGuid.group('size')] = RegionSize.group('size')
RegionLayoutLine = self.CurrentLineNumber + 1
if IfList:
raise Warning("Missing !endif", self.FileName, self.CurrentLineNumber)
self.Rewind()
def _CollectMacroPcd(self):
MacroDict = {}
# PCD macro
MacroDict.update(GlobalData.gPlatformPcds)
MacroDict.update(self._PcdDict)
# Lowest priority
MacroDict.update(GlobalData.gPlatformDefines)
if self._CurSection:
# Defines macro
ScopeMacro = self._MacroDict[TAB_COMMON, TAB_COMMON, TAB_COMMON]
if ScopeMacro:
MacroDict.update(ScopeMacro)
# Section macro
ScopeMacro = self._MacroDict[
self._CurSection[0],
self._CurSection[1],
self._CurSection[2]
]
if ScopeMacro:
MacroDict.update(ScopeMacro)
MacroDict.update(GlobalData.gGlobalDefines)
MacroDict.update(GlobalData.gCommandLineDefines)
for Item in GlobalData.BuildOptionPcd:
if isinstance(Item, tuple):
continue
PcdName, TmpValue = Item.split(TAB_EQUAL_SPLIT)
TmpValue = BuildOptionValue(TmpValue, {})
MacroDict[PcdName.strip()] = TmpValue
# Highest priority
return MacroDict
def _EvaluateConditional(self, Expression, Line, Op = None, Value = None):
MacroPcdDict = self._CollectMacroPcd()
if Op == 'eval':
try:
if Value:
return ValueExpression(Expression, MacroPcdDict)(True)
else:
return ValueExpression(Expression, MacroPcdDict)()
except WrnExpression as Excpt:
#
# Catch expression evaluation warning here. We need to report
# the precise number of line and return the evaluation result
#
EdkLogger.warn('Parser', "Suspicious expression: %s" % str(Excpt),
File=self.FileName, ExtraData=self._CurrentLine(),
Line=Line)
return Excpt.result
except Exception as Excpt:
if hasattr(Excpt, 'Pcd'):
if Excpt.Pcd in GlobalData.gPlatformOtherPcds:
Info = GlobalData.gPlatformOtherPcds[Excpt.Pcd]
raise Warning("Cannot use this PCD (%s) in an expression as"
" it must be defined in a [PcdsFixedAtBuild] or [PcdsFeatureFlag] section"
" of the DSC file (%s), and it is currently defined in this section:"
" %s, line #: %d." % (Excpt.Pcd, GlobalData.gPlatformOtherPcds['DSCFILE'], Info[0], Info[1]),
self.FileName, Line)
else:
raise Warning("PCD (%s) is not defined in DSC file (%s)" % (Excpt.Pcd, GlobalData.gPlatformOtherPcds['DSCFILE']),
self.FileName, Line)
else:
raise Warning(str(Excpt), self.FileName, Line)
else:
if Expression.startswith('$(') and Expression[-1] == ')':
Expression = Expression[2:-1]
return Expression in MacroPcdDict
## _IsToken() method
#
# Check whether input string is found from current char position along
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def _IsToken(self, String, IgnoreCase = False):
self._SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].upper().find(String.upper())
else:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
## _IsKeyword() method
#
# Check whether input keyword is found from current char position along, whole word only!
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @param Keyword The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def _IsKeyword(self, KeyWord, IgnoreCase = False):
self._SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].upper().find(KeyWord.upper())
else:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].find(KeyWord)
if index == 0:
followingChar = self._CurrentLine()[self.CurrentOffsetWithinLine + len(KeyWord)]
if not str(followingChar).isspace() and followingChar not in SEPARATORS:
return False
self.CurrentOffsetWithinLine += len(KeyWord)
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
def _GetExpression(self):
Line = self.Profile.FileLinesList[self.CurrentLineNumber - 1]
Index = len(Line) - 1
while Line[Index] in CR_LB_SET:
Index -= 1
ExpressionString = self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:Index+1]
self.CurrentOffsetWithinLine += len(ExpressionString)
ExpressionString = ExpressionString.strip()
return ExpressionString
## _GetNextWord() method
#
# Get next C name from file lines
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a C name string, file buffer pointer moved forward
# @retval False Not able to find a C name string, file buffer pointer not changed
#
def _GetNextWord(self):
self._SkipWhiteSpace()
if self._EndOfFile():
return False
TempChar = self._CurrentChar()
StartPos = self.CurrentOffsetWithinLine
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') or TempChar == '_':
self._GetOneChar()
while not self._EndOfLine():
TempChar = self._CurrentChar()
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') \
or (TempChar >= '0' and TempChar <= '9') or TempChar == '_' or TempChar == '-':
self._GetOneChar()
else:
break
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
def _GetNextPcdWord(self):
self._SkipWhiteSpace()
if self._EndOfFile():
return False
TempChar = self._CurrentChar()
StartPos = self.CurrentOffsetWithinLine
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') or TempChar == '_' or TempChar == TAB_SECTION_START or TempChar == TAB_SECTION_END:
self._GetOneChar()
while not self._EndOfLine():
TempChar = self._CurrentChar()
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') \
or (TempChar >= '0' and TempChar <= '9') or TempChar == '_' or TempChar == '-' or TempChar == TAB_SECTION_START or TempChar == TAB_SECTION_END:
self._GetOneChar()
else:
break
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
## _GetNextToken() method
#
# Get next token unit before a separator
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a token unit, file buffer pointer moved forward
# @retval False Not able to find a token unit, file buffer pointer not changed
#
def _GetNextToken(self):
# Skip leading spaces, if exist.
self._SkipWhiteSpace()
if self._EndOfFile():
return False
# Record the token start position, the position of the first non-space char.
StartPos = self.CurrentOffsetWithinLine
StartLine = self.CurrentLineNumber
while StartLine == self.CurrentLineNumber:
TempChar = self._CurrentChar()
# Try to find the end char that is not a space and not in separator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and TempChar not in SEPARATORS:
self._GetOneChar()
# if we happen to meet a separator as the first char, we must proceed to get it.
# That is, we get a token that is a separator char. normally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPARATORS:
self._GetOneChar()
break
else:
break
# else:
# return False
EndPos = self.CurrentOffsetWithinLine
if self.CurrentLineNumber != StartLine:
EndPos = len(self.Profile.FileLinesList[StartLine-1])
self._Token = self.Profile.FileLinesList[StartLine-1][StartPos: EndPos]
if self._Token.lower() in {TAB_IF, TAB_END_IF, TAB_ELSE_IF, TAB_ELSE, TAB_IF_DEF, TAB_IF_N_DEF, TAB_ERROR, TAB_INCLUDE}:
self._Token = self._Token.lower()
if StartPos != self.CurrentOffsetWithinLine:
return True
else:
return False
## _GetNextGuid() method
#
# Get next token unit before a separator
# If found, the GUID string is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a registry format GUID, file buffer pointer moved forward
# @retval False Not able to find a registry format GUID, file buffer pointer not changed
#
def _GetNextGuid(self):
if not self._GetNextToken():
return False
if GlobalData.gGuidPattern.match(self._Token) is not None:
return True
else:
self._UndoToken()
return False
@staticmethod
def _Verify(Name, Value, Scope):
# value verification only applies to numeric values.
if Scope not in TAB_PCD_NUMERIC_TYPES:
return
ValueNumber = 0
try:
ValueNumber = int(Value, 0)
except:
EdkLogger.error("FdfParser", FORMAT_INVALID, "The value is not valid dec or hex number for %s." % Name)
if ValueNumber < 0:
EdkLogger.error("FdfParser", FORMAT_INVALID, "The value can't be set to negative value for %s." % Name)
if ValueNumber > MAX_VAL_TYPE[Scope]:
EdkLogger.error("FdfParser", FORMAT_INVALID, "Too large value for %s." % Name)
return True
## _UndoToken() method
#
# Go back one token unit in file buffer
#
# @param self The object pointer
#
def _UndoToken(self):
self._UndoOneChar()
while self._CurrentChar().isspace():
if not self._UndoOneChar():
self._GetOneChar()
return
StartPos = self.CurrentOffsetWithinLine
CurrentLine = self.CurrentLineNumber
while CurrentLine == self.CurrentLineNumber:
TempChar = self._CurrentChar()
# Try to find the end char that is not a space and not in separator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and not TempChar in SEPARATORS:
if not self._UndoOneChar():
return
# if we happen to meet a separator as the first char, we must proceed to get it.
# That is, we get a token that is a separator char. normally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPARATORS:
return
else:
break
self._GetOneChar()
## _GetNextHexNumber() method
#
# Get next HEX data before a separator
# If found, the HEX data is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a HEX data, file buffer pointer moved forward
# @retval False Not able to find a HEX data, file buffer pointer not changed
#
def _GetNextHexNumber(self):
if not self._GetNextToken():
return False
if GlobalData.gHexPatternAll.match(self._Token):
return True
else:
self._UndoToken()
return False
## _GetNextDecimalNumber() method
#
# Get next decimal data before a separator
# If found, the decimal data is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a decimal data, file buffer pointer moved forward
# @retval False Not able to find a decimal data, file buffer pointer not changed
#
def _GetNextDecimalNumber(self):
if not self._GetNextToken():
return False
if self._Token.isdigit():
return True
else:
self._UndoToken()
return False
def _GetNextPcdSettings(self):
if not self._GetNextWord():
raise Warning.Expected("<PcdTokenSpaceCName>", self.FileName, self.CurrentLineNumber)
pcdTokenSpaceCName = self._Token
if not self._IsToken(TAB_SPLIT):
raise Warning.Expected(".", self.FileName, self.CurrentLineNumber)
if not self._GetNextWord():
raise Warning.Expected("<PcdCName>", self.FileName, self.CurrentLineNumber)
pcdCName = self._Token
Fields = []
while self._IsToken(TAB_SPLIT):
if not self._GetNextPcdWord():
raise Warning.Expected("Pcd Fields", self.FileName, self.CurrentLineNumber)
Fields.append(self._Token)
return (pcdCName, pcdTokenSpaceCName,TAB_SPLIT.join(Fields))
## _GetStringData() method
#
# Get string contents quoted in ""
# If found, the decimal data is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a string data, file buffer pointer moved forward
# @retval False Not able to find a string data, file buffer pointer not changed
#
def _GetStringData(self):
QuoteToUse = None
if self._Token.startswith(T_CHAR_DOUBLE_QUOTE) or self._Token.startswith("L\""):
QuoteToUse = T_CHAR_DOUBLE_QUOTE
elif self._Token.startswith(T_CHAR_SINGLE_QUOTE) or self._Token.startswith("L\'"):
QuoteToUse = T_CHAR_SINGLE_QUOTE
else:
return False
self._UndoToken()
self._SkipToToken(QuoteToUse)
currentLineNumber = self.CurrentLineNumber
if not self._SkipToToken(QuoteToUse):
raise Warning(QuoteToUse, self.FileName, self.CurrentLineNumber)
if currentLineNumber != self.CurrentLineNumber:
raise Warning(QuoteToUse, self.FileName, self.CurrentLineNumber)
self._Token = self._SkippedChars.rstrip(QuoteToUse)
return True
## _SkipToToken() method
#
# Search forward in file buffer for the string
# The skipped chars are put into self._SkippedChars
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find the string, file buffer pointer moved forward
# @retval False Not able to find the string, file buffer pointer not changed
#
def _SkipToToken(self, String, IgnoreCase = False):
StartPos = self.GetFileBufferPos()
self._SkippedChars = ""
while not self._EndOfFile():
index = -1
if IgnoreCase:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].upper().find(String.upper())
else:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self._SkippedChars += String
return True
self._SkippedChars += str(self._CurrentChar())
self._GetOneChar()
self.SetFileBufferPos(StartPos)
self._SkippedChars = ""
return False
## GetFileBufferPos() method
#
# Return the tuple of current line and offset within the line
#
# @param self The object pointer
# @retval Tuple Line number and offset pair
#
def GetFileBufferPos(self):
return (self.CurrentLineNumber, self.CurrentOffsetWithinLine)
## SetFileBufferPos() method
#
# Restore the file buffer position
#
# @param self The object pointer
# @param Pos The new file buffer position
#
def SetFileBufferPos(self, Pos):
(self.CurrentLineNumber, self.CurrentOffsetWithinLine) = Pos
## Preprocess() method
#
# Preprocess comment, conditional directive, include directive, replace macro.
# Exception will be raised if syntax error found
#
# @param self The object pointer
#
def Preprocess(self):
self._StringToList()
self.PreprocessFile()
self.PreprocessIncludeFile()
self._StringToList()
self.PreprocessFile()
self.PreprocessConditionalStatement()
self._StringToList()
for Pos in self._WipeOffArea:
self._ReplaceFragment(Pos[0], Pos[1])
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
while self._GetDefines():
pass
## ParseFile() method
#
# Parse the file profile buffer to extract fd, fv ... information
# Exception will be raised if syntax error found
#
# @param self The object pointer
#
def ParseFile(self):
try:
self.Preprocess()
self._GetError()
#
# Keep processing sections of the FDF until no new sections or a syntax error is found
#
while self._GetFd() or self._GetFv() or self._GetFmp() or self._GetCapsule() or self._GetRule() or self._GetOptionRom():
pass
except Warning as X:
self._UndoToken()
#'\n\tGot Token: \"%s\" from File %s\n' % (self._Token, FileLineTuple[0]) + \
# At this point, the closest parent would be the included file itself
Profile = GetParentAtLine(X.OriginalLineNumber)
if Profile is not None:
X.Message += ' near line %d, column %d: %s' \
% (X.LineNumber, 0, Profile.FileLinesList[X.LineNumber-1])
else:
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
X.Message += ' near line %d, column %d: %s' \
% (FileLineTuple[1], self.CurrentOffsetWithinLine + 1, self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:].rstrip(TAB_LINE_BREAK).rstrip(T_CHAR_CR))
raise
## SectionParser() method
#
# Parse the file section info
# Exception will be raised if syntax error found
#
# @param self The object pointer
# @param section The section string
def SectionParser(self, section):
S = section.upper()
if not S.startswith("[DEFINES") and not S.startswith("[FD.") and not S.startswith("[FV.") and not S.startswith("[CAPSULE.") \
and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM.") and not S.startswith('[FMPPAYLOAD.'):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [DEFINES], [FD.], [FV.], [Capsule.], [Rule.], [OptionRom.], [FMPPAYLOAD.])", self.FileName, self.CurrentLineNumber)
## _GetDefines() method
#
# Get Defines section contents and store its data into AllMacrosList
#
# @param self The object pointer
# @retval True Successfully find a Defines
# @retval False Not able to find a Defines
#
def _GetDefines(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[DEFINES"):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[DEFINES", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[DEFINES", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
while self._GetNextWord():
# handle the SET statement
if self._Token == 'SET':
self._UndoToken()
self._GetSetStatement(None)
continue
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken() or self._Token.startswith(TAB_SECTION_START):
raise Warning.Expected("MACRO value", self.FileName, self.CurrentLineNumber)
Value = self._Token
return False
##_GetError() method
def _GetError(self):
#save the Current information
CurrentLine = self.CurrentLineNumber
CurrentOffset = self.CurrentOffsetWithinLine
while self._GetNextToken():
if self._Token == TAB_ERROR:
EdkLogger.error('FdfParser', ERROR_STATEMENT, self._CurrentLine().replace(TAB_ERROR, '', 1), File=self.FileName, Line=self.CurrentLineNumber)
self.CurrentLineNumber = CurrentLine
self.CurrentOffsetWithinLine = CurrentOffset
## _GetFd() method
#
# Get FD section contents and store its data into FD dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FD
# @retval False Not able to find a FD
#
def _GetFd(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[FD."):
if not S.startswith("[FV.") and not S.startswith('[FMPPAYLOAD.') and not S.startswith("[CAPSULE.") \
and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section", self.FileName, self.CurrentLineNumber)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[FD.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[FD.]", self.FileName, self.CurrentLineNumber)
FdName = self._GetUiName()
if FdName == "":
if len (self.Profile.FdDict) == 0:
FdName = GenFdsGlobalVariable.PlatformName
if FdName == "" and GlobalData.gActivePlatform:
FdName = GlobalData.gActivePlatform.PlatformName
self.Profile.FdNameNotSet = True
else:
raise Warning.Expected("FdName in [FD.] section", self.FileName, self.CurrentLineNumber)
self.CurrentFdName = FdName.upper()
if self.CurrentFdName in self.Profile.FdDict:
raise Warning("Unexpected the same FD name", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
FdObj = FD()
FdObj.FdUiName = self.CurrentFdName
self.Profile.FdDict[self.CurrentFdName] = FdObj
if len (self.Profile.FdDict) > 1 and self.Profile.FdNameNotSet:
raise Warning.Expected("all FDs have their name", self.FileName, self.CurrentLineNumber)
Status = self._GetCreateFile(FdObj)
if not Status:
raise Warning("FD name error", self.FileName, self.CurrentLineNumber)
while self._GetTokenStatements(FdObj):
pass
for Attr in ("BaseAddress", "Size", "ErasePolarity"):
if getattr(FdObj, Attr) is None:
self._GetNextToken()
raise Warning("Keyword %s missing" % Attr, self.FileName, self.CurrentLineNumber)
if not FdObj.BlockSizeList:
FdObj.BlockSizeList.append((1, FdObj.Size, None))
self._GetDefineStatements(FdObj)
self._GetSetStatements(FdObj)
if not self._GetRegionLayout(FdObj):
raise Warning.Expected("region layout", self.FileName, self.CurrentLineNumber)
while self._GetRegionLayout(FdObj):
pass
return True
## _GetUiName() method
#
# Return the UI name of a section
#
# @param self The object pointer
# @retval FdName UI name
#
def _GetUiName(self):
Name = ""
if self._GetNextWord():
Name = self._Token
return Name
## _GetCreateFile() method
#
# Return the output file name of object
#
# @param self The object pointer
# @param Obj object whose data will be stored in file
# @retval FdName UI name
#
def _GetCreateFile(self, Obj):
if self._IsKeyword("CREATE_FILE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("file name", self.FileName, self.CurrentLineNumber)
FileName = self._Token
Obj.CreateFileName = FileName
return True
def SetPcdLocalation(self,pcdpair):
self.Profile.PcdLocalDict[pcdpair] = (self.Profile.FileName,self.CurrentLineNumber)
## _GetTokenStatements() method
#
# Get token statements
#
# @param self The object pointer
# @param Obj for whom token statement is got
#
def _GetTokenStatements(self, Obj):
if self._IsKeyword("BaseAddress"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex base address", self.FileName, self.CurrentLineNumber)
Obj.BaseAddress = self._Token
if self._IsToken(TAB_VALUE_SPLIT):
pcdPair = self._GetNextPcdSettings()
Obj.BaseAddressPcd = pcdPair
self.Profile.PcdDict[pcdPair] = Obj.BaseAddress
self.SetPcdLocalation(pcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
return True
if self._IsKeyword("Size"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex size", self.FileName, self.CurrentLineNumber)
Size = self._Token
if self._IsToken(TAB_VALUE_SPLIT):
pcdPair = self._GetNextPcdSettings()
Obj.SizePcd = pcdPair
self.Profile.PcdDict[pcdPair] = Size
self.SetPcdLocalation(pcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
Obj.Size = int(Size, 0)
return True
if self._IsKeyword("ErasePolarity"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Erase Polarity", self.FileName, self.CurrentLineNumber)
if not self._Token in {"1", "0"}:
raise Warning.Expected("1 or 0 Erase Polarity", self.FileName, self.CurrentLineNumber)
Obj.ErasePolarity = self._Token
return True
return self._GetBlockStatements(Obj)
## _GetAddressStatements() method
#
# Get address statements
#
# @param self The object pointer
# @param Obj for whom address statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetAddressStatements(self, Obj):
if self._IsKeyword("BsBaseAddress"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextDecimalNumber() and not self._GetNextHexNumber():
raise Warning.Expected("address", self.FileName, self.CurrentLineNumber)
BsAddress = int(self._Token, 0)
Obj.BsBaseAddress = BsAddress
if self._IsKeyword("RtBaseAddress"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextDecimalNumber() and not self._GetNextHexNumber():
raise Warning.Expected("address", self.FileName, self.CurrentLineNumber)
RtAddress = int(self._Token, 0)
Obj.RtBaseAddress = RtAddress
## _GetBlockStatements() method
#
# Get block statements
#
# @param self The object pointer
# @param Obj for whom block statement is got
#
def _GetBlockStatements(self, Obj):
IsBlock = False
while self._GetBlockStatement(Obj):
IsBlock = True
Item = Obj.BlockSizeList[-1]
if Item[0] is None or Item[1] is None:
raise Warning.Expected("block statement", self.FileName, self.CurrentLineNumber)
return IsBlock
## _GetBlockStatement() method
#
# Get block statement
#
# @param self The object pointer
# @param Obj for whom block statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetBlockStatement(self, Obj):
if not self._IsKeyword("BlockSize"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber() and not self._GetNextDecimalNumber():
raise Warning.Expected("Hex or Integer block size", self.FileName, self.CurrentLineNumber)
BlockSize = self._Token
BlockSizePcd = None
if self._IsToken(TAB_VALUE_SPLIT):
PcdPair = self._GetNextPcdSettings()
BlockSizePcd = PcdPair
self.Profile.PcdDict[PcdPair] = BlockSize
self.SetPcdLocalation(PcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
BlockSize = int(BlockSize, 0)
BlockNumber = None
if self._IsKeyword("NumBlocks"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextDecimalNumber() and not self._GetNextHexNumber():
raise Warning.Expected("block numbers", self.FileName, self.CurrentLineNumber)
BlockNumber = int(self._Token, 0)
Obj.BlockSizeList.append((BlockSize, BlockNumber, BlockSizePcd))
return True
## _GetDefineStatements() method
#
# Get define statements
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetDefineStatements(self, Obj):
while self._GetDefineStatement(Obj):
pass
## _GetDefineStatement() method
#
# Get define statement
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetDefineStatement(self, Obj):
if self._IsKeyword(TAB_DEFINE):
self._GetNextToken()
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("value", self.FileName, self.CurrentLineNumber)
Value = self._Token
Macro = '$(' + Macro + ')'
Obj.DefineVarDict[Macro] = Value
return True
return False
## _GetSetStatements() method
#
# Get set statements
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetSetStatements(self, Obj):
while self._GetSetStatement(Obj):
pass
## _GetSetStatement() method
#
# Get set statement
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetSetStatement(self, Obj):
if self._IsKeyword("SET"):
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
Value = self._EvaluateConditional(Value, self.CurrentLineNumber, 'eval', True)
if Obj:
Obj.SetVarDict[PcdPair] = Value
self.Profile.PcdDict[PcdPair] = Value
self.SetPcdLocalation(PcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
return True
return False
## _CalcRegionExpr(self)
#
# Calculate expression for offset or size of a region
#
# @return: None if invalid expression
# Calculated number if successfully
#
def _CalcRegionExpr(self):
StartPos = self.GetFileBufferPos()
Expr = ''
PairCount = 0
while not self._EndOfFile():
CurCh = self._CurrentChar()
if CurCh == '(':
PairCount += 1
elif CurCh == ')':
PairCount -= 1
if CurCh in '|\r\n' and PairCount == 0:
break
Expr += CurCh
self._GetOneChar()
try:
return int(
ValueExpression(Expr,
self._CollectMacroPcd()
)(True), 0)
except Exception:
self.SetFileBufferPos(StartPos)
return None
## _GetRegionLayout() method
#
# Get region layout for FD
#
# @param self The object pointer
# @param theFd for whom region is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetRegionLayout(self, theFd):
Offset = self._CalcRegionExpr()
if Offset is None:
return False
RegionObj = Region()
RegionObj.Offset = Offset
theFd.RegionList.append(RegionObj)
if not self._IsToken(TAB_VALUE_SPLIT):
raise Warning.Expected("'|'", self.FileName, self.CurrentLineNumber)
Size = self._CalcRegionExpr()
if Size is None:
raise Warning.Expected("Region Size", self.FileName, self.CurrentLineNumber)
RegionObj.Size = Size
if not self._GetNextWord():
return True
if not self._Token in {"SET", BINARY_FILE_TYPE_FV, "FILE", "DATA", "CAPSULE", "INF"}:
#
# If next token is a word which is not a valid FV type, it might be part of [PcdOffset[|PcdSize]]
# Or it might be next region's offset described by an expression which starts with a PCD.
# PcdOffset[|PcdSize] or OffsetPcdExpression|Size
#
self._UndoToken()
IsRegionPcd = (RegionSizeGuidPattern.match(self._CurrentLine()[self.CurrentOffsetWithinLine:]) or
RegionOffsetPcdPattern.match(self._CurrentLine()[self.CurrentOffsetWithinLine:]))
if IsRegionPcd:
RegionObj.PcdOffset = self._GetNextPcdSettings()
self.Profile.PcdDict[RegionObj.PcdOffset] = "0x%08X" % (RegionObj.Offset + int(theFd.BaseAddress, 0))
self.SetPcdLocalation(RegionObj.PcdOffset)
self._PcdDict['%s.%s' % (RegionObj.PcdOffset[1], RegionObj.PcdOffset[0])] = "0x%x" % RegionObj.Offset
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdOffset] = FileLineTuple
if self._IsToken(TAB_VALUE_SPLIT):
RegionObj.PcdSize = self._GetNextPcdSettings()
self.Profile.PcdDict[RegionObj.PcdSize] = "0x%08X" % RegionObj.Size
self.SetPcdLocalation(RegionObj.PcdSize)
self._PcdDict['%s.%s' % (RegionObj.PcdSize[1], RegionObj.PcdSize[0])] = "0x%x" % RegionObj.Size
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdSize] = FileLineTuple
if not self._GetNextWord():
return True
if self._Token == "SET":
self._UndoToken()
self._GetSetStatements(RegionObj)
if not self._GetNextWord():
return True
elif self._Token == BINARY_FILE_TYPE_FV:
self._UndoToken()
self._GetRegionFvType(RegionObj)
elif self._Token == "CAPSULE":
self._UndoToken()
self._GetRegionCapType(RegionObj)
elif self._Token == "FILE":
self._UndoToken()
self._GetRegionFileType(RegionObj)
elif self._Token == "INF":
self._UndoToken()
RegionObj.RegionType = "INF"
while self._IsKeyword("INF"):
self._UndoToken()
ffsInf = self._ParseInfStatement()
if not ffsInf:
break
RegionObj.RegionDataList.append(ffsInf)
elif self._Token == "DATA":
self._UndoToken()
self._GetRegionDataType(RegionObj)
else:
self._UndoToken()
if self._GetRegionLayout(theFd):
return True
raise Warning("A valid region type was not found. "
"Valid types are [SET, FV, CAPSULE, FILE, DATA, INF]. This error occurred",
self.FileName, self.CurrentLineNumber)
return True
## _GetRegionFvType() method
#
# Get region fv data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionFvType(self, RegionObj):
if not self._IsKeyword(BINARY_FILE_TYPE_FV):
raise Warning.Expected("'FV'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = BINARY_FILE_TYPE_FV
RegionObj.RegionDataList.append((self._Token).upper())
while self._IsKeyword(BINARY_FILE_TYPE_FV):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append((self._Token).upper())
## _GetRegionCapType() method
#
# Get region capsule data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionCapType(self, RegionObj):
if not self._IsKeyword("CAPSULE"):
raise Warning.Expected("'CAPSULE'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("CAPSULE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "CAPSULE"
RegionObj.RegionDataList.append(self._Token)
while self._IsKeyword("CAPSULE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("CAPSULE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self._Token)
## _GetRegionFileType() method
#
# Get region file data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionFileType(self, RegionObj):
if not self._IsKeyword("FILE"):
raise Warning.Expected("'FILE'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("File name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "FILE"
RegionObj.RegionDataList.append(self._Token)
while self._IsKeyword("FILE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FILE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self._Token)
## _GetRegionDataType() method
#
# Get region array data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionDataType(self, RegionObj):
if not self._IsKeyword("DATA"):
raise Warning.Expected("Region Data type", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex byte", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
# convert hex string value to byte hex string array
AllString = self._Token
AllStrLen = len (AllString)
DataString = ""
while AllStrLen > 4:
DataString = DataString + "0x" + AllString[AllStrLen - 2: AllStrLen] + TAB_COMMA_SPLIT
AllStrLen = AllStrLen - 2
DataString = DataString + AllString[:AllStrLen] + TAB_COMMA_SPLIT
# byte value array
if len (self._Token) <= 4:
while self._IsToken(TAB_COMMA_SPLIT):
if not self._GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self._Token
DataString += TAB_COMMA_SPLIT
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(TAB_COMMA_SPLIT)
RegionObj.RegionType = "DATA"
RegionObj.RegionDataList.append(DataString)
while self._IsKeyword("DATA"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex byte", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
# convert hex string value to byte hex string array
AllString = self._Token
AllStrLen = len (AllString)
DataString = ""
while AllStrLen > 4:
DataString = DataString + "0x" + AllString[AllStrLen - 2: AllStrLen] + TAB_COMMA_SPLIT
AllStrLen = AllStrLen - 2
DataString = DataString + AllString[:AllStrLen] + TAB_COMMA_SPLIT
# byte value array
if len (self._Token) <= 4:
while self._IsToken(TAB_COMMA_SPLIT):
if not self._GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self._Token
DataString += TAB_COMMA_SPLIT
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(TAB_COMMA_SPLIT)
RegionObj.RegionDataList.append(DataString)
## _GetFv() method
#
# Get FV section contents and store its data into FV dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FV
# @retval False Not able to find a FV
#
def _GetFv(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[FV."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[FV.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("Unknown Keyword '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
FvName = self._GetUiName()
self.CurrentFvName = FvName.upper()
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
FvObj = FV(Name=self.CurrentFvName)
self.Profile.FvDict[self.CurrentFvName] = FvObj
Status = self._GetCreateFile(FvObj)
if not Status:
raise Warning("FV name error", self.FileName, self.CurrentLineNumber)
self._GetDefineStatements(FvObj)
self._GetAddressStatements(FvObj)
while True:
self._GetSetStatements(FvObj)
if not (self._GetBlockStatement(FvObj) or self._GetFvBaseAddress(FvObj) or
self._GetFvForceRebase(FvObj) or self._GetFvAlignment(FvObj) or
self._GetFvAttributes(FvObj) or self._GetFvNameGuid(FvObj) or
self._GetFvExtEntryStatement(FvObj) or self._GetFvNameString(FvObj)):
break
if FvObj.FvNameString == 'TRUE' and not FvObj.FvNameGuid:
raise Warning("FvNameString found but FvNameGuid was not found", self.FileName, self.CurrentLineNumber)
self._GetAprioriSection(FvObj)
self._GetAprioriSection(FvObj)
while True:
isInf = self._GetInfStatement(FvObj)
isFile = self._GetFileStatement(FvObj)
if not isInf and not isFile:
break
return True
## _GetFvAlignment() method
#
# Get alignment for FV
#
# @param self The object pointer
# @param Obj for whom alignment is got
# @retval True Successfully find a alignment statement
# @retval False Not able to find a alignment statement
#
def _GetFvAlignment(self, Obj):
if not self._IsKeyword("FvAlignment"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("alignment value", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in {"1", "2", "4", "8", "16", "32", "64", "128", "256", "512", \
"1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", \
"1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", \
"1G", "2G"}:
raise Warning("Unknown alignment value '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
Obj.FvAlignment = self._Token
return True
## _GetFvBaseAddress() method
#
# Get BaseAddress for FV
#
# @param self The object pointer
# @param Obj for whom FvBaseAddress is got
# @retval True Successfully find a FvBaseAddress statement
# @retval False Not able to find a FvBaseAddress statement
#
def _GetFvBaseAddress(self, Obj):
if not self._IsKeyword("FvBaseAddress"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV base address value", self.FileName, self.CurrentLineNumber)
if not BaseAddrValuePattern.match(self._Token.upper()):
raise Warning("Unknown FV base address value '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
Obj.FvBaseAddress = self._Token
return True
## _GetFvForceRebase() method
#
# Get FvForceRebase for FV
#
# @param self The object pointer
# @param Obj for whom FvForceRebase is got
# @retval True Successfully find a FvForceRebase statement
# @retval False Not able to find a FvForceRebase statement
#
def _GetFvForceRebase(self, Obj):
if not self._IsKeyword("FvForceRebase"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FvForceRebase value", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in {"TRUE", "FALSE", "0", "0X0", "0X00", "1", "0X1", "0X01"}:
raise Warning("Unknown FvForceRebase value '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if self._Token.upper() in {"TRUE", "1", "0X1", "0X01"}:
Obj.FvForceRebase = True
elif self._Token.upper() in {"FALSE", "0", "0X0", "0X00"}:
Obj.FvForceRebase = False
else:
Obj.FvForceRebase = None
return True
## _GetFvAttributes() method
#
# Get attributes for FV
#
# @param self The object pointer
# @param Obj for whom attribute is got
# @retval None
#
def _GetFvAttributes(self, FvObj):
IsWordToken = False
while self._GetNextWord():
IsWordToken = True
name = self._Token
if name not in {"ERASE_POLARITY", "MEMORY_MAPPED", \
"STICKY_WRITE", "LOCK_CAP", "LOCK_STATUS", "WRITE_ENABLED_CAP", \
"WRITE_DISABLED_CAP", "WRITE_STATUS", "READ_ENABLED_CAP", \
"READ_DISABLED_CAP", "READ_STATUS", "READ_LOCK_CAP", \
"READ_LOCK_STATUS", "WRITE_LOCK_CAP", "WRITE_LOCK_STATUS", \
"WRITE_POLICY_RELIABLE", "WEAK_ALIGNMENT", "FvUsedSizeEnable"}:
self._UndoToken()
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken() or self._Token.upper() not in {"TRUE", "FALSE", "1", "0"}:
raise Warning.Expected("TRUE/FALSE (1/0)", self.FileName, self.CurrentLineNumber)
FvObj.FvAttributeDict[name] = self._Token
return IsWordToken
## _GetFvNameGuid() method
#
# Get FV GUID for FV
#
# @param self The object pointer
# @param Obj for whom GUID is got
# @retval None
#
def _GetFvNameGuid(self, FvObj):
if not self._IsKeyword("FvNameGuid"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextGuid():
raise Warning.Expected("GUID value", self.FileName, self.CurrentLineNumber)
FvObj.FvNameGuid = self._Token
return True
def _GetFvNameString(self, FvObj):
if not self._IsKeyword("FvNameString"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken() or self._Token.upper() not in {'TRUE', 'FALSE'}:
raise Warning.Expected("TRUE or FALSE for FvNameString", self.FileName, self.CurrentLineNumber)
FvObj.FvNameString = self._Token
return True
def _GetFvExtEntryStatement(self, FvObj):
if not (self._IsKeyword("FV_EXT_ENTRY") or self._IsKeyword("FV_EXT_ENTRY_TYPE")):
return False
if not self._IsKeyword ("TYPE"):
raise Warning.Expected("'TYPE'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber() and not self._GetNextDecimalNumber():
raise Warning.Expected("Hex FV extension entry type value At Line ", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryTypeValue.append(self._Token)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._IsKeyword("FILE") and not self._IsKeyword("DATA"):
raise Warning.Expected("'FILE' or 'DATA'", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryType.append(self._Token)
if self._Token == 'DATA':
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex byte", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString = self._Token
DataString += TAB_COMMA_SPLIT
while self._IsToken(TAB_COMMA_SPLIT):
if not self._GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self._Token
DataString += TAB_COMMA_SPLIT
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(TAB_COMMA_SPLIT)
FvObj.FvExtEntryData.append(DataString)
if self._Token == 'FILE':
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV Extension Entry file path At Line ", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryData.append(self._Token)
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
return True
## _GetAprioriSection() method
#
# Get token statements
#
# @param self The object pointer
# @param FvObj for whom apriori is got
# @retval True Successfully find apriori statement
# @retval False Not able to find apriori statement
#
def _GetAprioriSection(self, FvObj):
if not self._IsKeyword("APRIORI"):
return False
if not self._IsKeyword("PEI") and not self._IsKeyword("DXE"):
raise Warning.Expected("Apriori file type", self.FileName, self.CurrentLineNumber)
AprType = self._Token
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
AprSectionObj = AprioriSection()
AprSectionObj.AprioriType = AprType
self._GetDefineStatements(AprSectionObj)
while True:
IsInf = self._GetInfStatement(AprSectionObj)
IsFile = self._GetFileStatement(AprSectionObj)
if not IsInf and not IsFile:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
FvObj.AprioriSectionList.append(AprSectionObj)
return True
def _ParseInfStatement(self):
if not self._IsKeyword("INF"):
return None
ffsInf = FfsInfStatement()
self._GetInfOptions(ffsInf)
if not self._GetNextToken():
raise Warning.Expected("INF file path", self.FileName, self.CurrentLineNumber)
ffsInf.InfFileName = self._Token
if not ffsInf.InfFileName.endswith('.inf'):
raise Warning.Expected(".inf file path", self.FileName, self.CurrentLineNumber)
ffsInf.CurrentLineNum = self.CurrentLineNumber
ffsInf.CurrentLineContent = self._CurrentLine()
#Replace $(SAPCE) with real space
ffsInf.InfFileName = ffsInf.InfFileName.replace('$(SPACE)', ' ')
if ffsInf.InfFileName.replace(TAB_WORKSPACE, '').find('$') == -1:
#do case sensitive check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(ffsInf.InfFileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
NewFileName = ffsInf.InfFileName
if ffsInf.OverrideGuid:
NewFileName = ProcessDuplicatedInf(PathClass(ffsInf.InfFileName,GenFdsGlobalVariable.WorkSpaceDir), ffsInf.OverrideGuid, GenFdsGlobalVariable.WorkSpaceDir).Path
if not NewFileName in self.Profile.InfList:
self.Profile.InfList.append(NewFileName)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.InfFileLineList.append(FileLineTuple)
if ffsInf.UseArch:
if ffsInf.UseArch not in self.Profile.InfDict:
self.Profile.InfDict[ffsInf.UseArch] = [ffsInf.InfFileName]
else:
self.Profile.InfDict[ffsInf.UseArch].append(ffsInf.InfFileName)
else:
self.Profile.InfDict['ArchTBD'].append(ffsInf.InfFileName)
if self._IsToken(TAB_VALUE_SPLIT):
if self._IsKeyword('RELOCS_STRIPPED'):
ffsInf.KeepReloc = False
elif self._IsKeyword('RELOCS_RETAINED'):
ffsInf.KeepReloc = True
else:
raise Warning("Unknown reloc strip flag '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
return ffsInf
## _GetInfStatement() method
#
# Get INF statements
#
# @param self The object pointer
# @param Obj for whom inf statement is got
# @retval True Successfully find inf statement
# @retval False Not able to find inf statement
#
def _GetInfStatement(self, Obj, ForCapsule=False):
ffsInf = self._ParseInfStatement()
if not ffsInf:
return False
if ForCapsule:
myCapsuleFfs = CapsuleFfs()
myCapsuleFfs.Ffs = ffsInf
Obj.CapsuleDataList.append(myCapsuleFfs)
else:
Obj.FfsList.append(ffsInf)
return True
## _GetInfOptions() method
#
# Get options for INF
#
# @param self The object pointer
# @param FfsInfObj for whom option is got
#
def _GetInfOptions(self, FfsInfObj):
if self._IsKeyword("FILE_GUID"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextGuid():
raise Warning.Expected("GUID value", self.FileName, self.CurrentLineNumber)
FfsInfObj.OverrideGuid = self._Token
if self._IsKeyword("RuleOverride"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Rule name", self.FileName, self.CurrentLineNumber)
FfsInfObj.Rule = self._Token
if self._IsKeyword("VERSION"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Version", self.FileName, self.CurrentLineNumber)
if self._GetStringData():
FfsInfObj.Version = self._Token
if self._IsKeyword(BINARY_FILE_TYPE_UI):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("UI name", self.FileName, self.CurrentLineNumber)
if self._GetStringData():
FfsInfObj.Ui = self._Token
if self._IsKeyword("USE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("ARCH name", self.FileName, self.CurrentLineNumber)
FfsInfObj.UseArch = self._Token
if self._GetNextToken():
p = compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\))')
if p.match(self._Token) and p.match(self._Token).span()[1] == len(self._Token):
FfsInfObj.KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
return
else:
self._UndoToken()
return
while self._GetNextToken():
if not p.match(self._Token):
raise Warning.Expected("KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
FfsInfObj.KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
break
## _GetFileStatement() method
#
# Get FILE statements
#
# @param self The object pointer
# @param Obj for whom FILE statement is got
# @retval True Successfully find FILE statement
# @retval False Not able to find FILE statement
#
def _GetFileStatement(self, Obj, ForCapsule = False):
if not self._IsKeyword("FILE"):
return False
if not self._GetNextWord():
raise Warning.Expected("FFS type", self.FileName, self.CurrentLineNumber)
if ForCapsule and self._Token == 'DATA':
self._UndoToken()
self._UndoToken()
return False
FfsFileObj = FileStatement()
FfsFileObj.FvFileType = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextGuid():
if not self._GetNextWord():
raise Warning.Expected("File GUID", self.FileName, self.CurrentLineNumber)
if self._Token == 'PCD':
if not self._IsToken("("):
raise Warning.Expected("'('", self.FileName, self.CurrentLineNumber)
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(")"):
raise Warning.Expected("')'", self.FileName, self.CurrentLineNumber)
self._Token = 'PCD('+PcdPair[1]+TAB_SPLIT+PcdPair[0]+')'
FfsFileObj.NameGuid = self._Token
self._GetFilePart(FfsFileObj)
if ForCapsule:
capsuleFfs = CapsuleFfs()
capsuleFfs.Ffs = FfsFileObj
Obj.CapsuleDataList.append(capsuleFfs)
else:
Obj.FfsList.append(FfsFileObj)
return True
## _FileCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a file type.
#
# @param FileType The file type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
@staticmethod
def _FileCouldHaveRelocFlag (FileType):
if FileType in {SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, SUP_MODULE_MM_CORE_STANDALONE, 'PEI_DXE_COMBO'}:
return True
else:
return False
## _SectionCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a section type.
#
# @param SectionType The section type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
@staticmethod
def _SectionCouldHaveRelocFlag (SectionType):
if SectionType in {BINARY_FILE_TYPE_TE, BINARY_FILE_TYPE_PE32}:
return True
else:
return False
## _GetFilePart() method
#
# Get components for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom component is got
#
def _GetFilePart(self, FfsFileObj):
self._GetFileOpts(FfsFileObj)
if not self._IsToken("{"):
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._FileCouldHaveRelocFlag(FfsFileObj.FvFileType):
if self._Token == 'RELOCS_STRIPPED':
FfsFileObj.KeepReloc = False
else:
FfsFileObj.KeepReloc = True
else:
raise Warning("File type %s could not have reloc strip flag%d" % (FfsFileObj.FvFileType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("File name or section data", self.FileName, self.CurrentLineNumber)
if self._Token == BINARY_FILE_TYPE_FV:
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
FfsFileObj.FvName = self._Token
elif self._Token == "FD":
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FD name", self.FileName, self.CurrentLineNumber)
FfsFileObj.FdName = self._Token
elif self._Token in {TAB_DEFINE, "APRIORI", "SECTION"}:
self._UndoToken()
self._GetSectionData(FfsFileObj)
elif hasattr(FfsFileObj, 'FvFileType') and FfsFileObj.FvFileType == 'RAW':
self._UndoToken()
self._GetRAWData(FfsFileObj)
else:
FfsFileObj.CurrentLineNum = self.CurrentLineNumber
FfsFileObj.CurrentLineContent = self._CurrentLine()
FfsFileObj.FileName = self._Token.replace('$(SPACE)', ' ')
self._VerifyFile(FfsFileObj.FileName)
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
## _GetRAWData() method
#
# Get RAW data for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom section is got
#
def _GetRAWData(self, FfsFileObj):
FfsFileObj.FileName = []
FfsFileObj.SubAlignment = []
while True:
AlignValue = None
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self._Token == "Auto":
AlignValue = self._Token
if not self._GetNextToken():
raise Warning.Expected("Filename value", self.FileName, self.CurrentLineNumber)
FileName = self._Token.replace('$(SPACE)', ' ')
if FileName == T_CHAR_BRACE_R:
self._UndoToken()
raise Warning.Expected("Filename value", self.FileName, self.CurrentLineNumber)
self._VerifyFile(FileName)
File = PathClass(NormPath(FileName), GenFdsGlobalVariable.WorkSpaceDir)
FfsFileObj.FileName.append(File.Path)
FfsFileObj.SubAlignment.append(AlignValue)
if self._IsToken(T_CHAR_BRACE_R):
self._UndoToken()
break
if len(FfsFileObj.SubAlignment) == 1:
FfsFileObj.SubAlignment = FfsFileObj.SubAlignment[0]
if len(FfsFileObj.FileName) == 1:
FfsFileObj.FileName = FfsFileObj.FileName[0]
## _GetFileOpts() method
#
# Get options for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom options is got
#
def _GetFileOpts(self, FfsFileObj):
if self._GetNextToken():
if TokenFindPattern.match(self._Token):
FfsFileObj.KeyStringList.append(self._Token)
if self._IsToken(TAB_COMMA_SPLIT):
while self._GetNextToken():
if not TokenFindPattern.match(self._Token):
raise Warning.Expected("KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
FfsFileObj.KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
break
else:
self._UndoToken()
if self._IsKeyword("FIXED", True):
FfsFileObj.Fixed = True
if self._IsKeyword("CHECKSUM", True):
FfsFileObj.CheckSum = True
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self._Token == "Auto":
FfsFileObj.Alignment = self._Token
## _GetAlignment() method
#
# Return the alignment value
#
# @param self The object pointer
# @retval True Successfully find alignment
# @retval False Not able to find alignment
#
def _GetAlignment(self):
if self._IsKeyword("Align", True):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("alignment value", self.FileName, self.CurrentLineNumber)
return True
return False
## _GetSectionData() method
#
# Get section data for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom section is got
#
def _GetSectionData(self, FfsFileObj):
self._GetDefineStatements(FfsFileObj)
while True:
IsLeafSection = self._GetLeafSection(FfsFileObj)
IsEncapSection = self._GetEncapsulationSec(FfsFileObj)
if not IsLeafSection and not IsEncapSection:
break
## _GetLeafSection() method
#
# Get leaf section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetLeafSection(self, Obj):
OldPos = self.GetFileBufferPos()
if not self._IsKeyword("SECTION"):
if len(Obj.SectionList) == 0:
raise Warning.Expected("SECTION", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
AlignValue = self._Token
BuildNum = None
if self._IsKeyword("BUILD_NUM"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Build number value", self.FileName, self.CurrentLineNumber)
BuildNum = self._Token
if self._IsKeyword("VERSION"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("version", self.FileName, self.CurrentLineNumber)
VerSectionObj = VerSection()
VerSectionObj.Alignment = AlignValue
VerSectionObj.BuildNum = BuildNum
if self._GetStringData():
VerSectionObj.StringData = self._Token
else:
VerSectionObj.FileName = self._Token
Obj.SectionList.append(VerSectionObj)
elif self._IsKeyword(BINARY_FILE_TYPE_UI):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("UI", self.FileName, self.CurrentLineNumber)
UiSectionObj = UiSection()
UiSectionObj.Alignment = AlignValue
if self._GetStringData():
UiSectionObj.StringData = self._Token
else:
UiSectionObj.FileName = self._Token
Obj.SectionList.append(UiSectionObj)
elif self._IsKeyword("FV_IMAGE"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name or FV file path", self.FileName, self.CurrentLineNumber)
FvName = self._Token
FvObj = None
if self._IsToken("{"):
FvObj = FV()
FvObj.UiFvName = FvName.upper()
self._GetDefineStatements(FvObj)
self._GetBlockStatement(FvObj)
self._GetSetStatements(FvObj)
self._GetFvAlignment(FvObj)
self._GetFvAttributes(FvObj)
while True:
IsInf = self._GetInfStatement(FvObj)
IsFile = self._GetFileStatement(FvObj)
if not IsInf and not IsFile:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
FvImageSectionObj = FvImageSection()
FvImageSectionObj.Alignment = AlignValue
if FvObj is not None:
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
FvImageSectionObj.FvName = FvName.upper()
FvImageSectionObj.FvFileName = FvName
Obj.SectionList.append(FvImageSectionObj)
elif self._IsKeyword("PEI_DEPEX_EXP") or self._IsKeyword("DXE_DEPEX_EXP") or self._IsKeyword("SMM_DEPEX_EXP"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
DepexSectionObj = DepexSection()
DepexSectionObj.Alignment = AlignValue
DepexSectionObj.DepexType = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._SkipToToken(T_CHAR_BRACE_R):
raise Warning.Expected("Depex expression ending '}'", self.FileName, self.CurrentLineNumber)
DepexSectionObj.Expression = self._SkippedChars.rstrip(T_CHAR_BRACE_R)
Obj.SectionList.append(DepexSectionObj)
else:
if not self._GetNextWord():
raise Warning.Expected("section type", self.FileName, self.CurrentLineNumber)
# Encapsulation section appear, UndoToken and return
if self._Token == "COMPRESS" or self._Token == "GUIDED":
self.SetFileBufferPos(OldPos)
return False
if self._Token not in {"COMPAT16", BINARY_FILE_TYPE_PE32, BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE, "FV_IMAGE", "RAW", BINARY_FILE_TYPE_DXE_DEPEX,\
BINARY_FILE_TYPE_UI, "VERSION", BINARY_FILE_TYPE_PEI_DEPEX, "SUBTYPE_GUID", BINARY_FILE_TYPE_SMM_DEPEX}:
raise Warning("Unknown section type '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if AlignValue == 'Auto'and (not self._Token == BINARY_FILE_TYPE_PE32) and (not self._Token == BINARY_FILE_TYPE_TE):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
# DataSection
DataSectionObj = DataSection()
DataSectionObj.Alignment = AlignValue
DataSectionObj.SecType = self._Token
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._FileCouldHaveRelocFlag(Obj.FvFileType) and self._SectionCouldHaveRelocFlag(DataSectionObj.SecType):
if self._Token == 'RELOCS_STRIPPED':
DataSectionObj.KeepReloc = False
else:
DataSectionObj.KeepReloc = True
else:
raise Warning("File type %s, section type %s, could not have reloc strip flag%d" % (Obj.FvFileType, DataSectionObj.SecType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if self._IsToken(TAB_EQUAL_SPLIT):
if not self._GetNextToken():
raise Warning.Expected("section file path", self.FileName, self.CurrentLineNumber)
DataSectionObj.SectFileName = self._Token
self._VerifyFile(DataSectionObj.SectFileName)
else:
if not self._GetCglSection(DataSectionObj):
return False
Obj.SectionList.append(DataSectionObj)
return True
## _VerifyFile
#
# Check if file exists or not:
# If current phase if GenFds, the file must exist;
# If current phase is AutoGen and the file is not in $(OUTPUT_DIRECTORY), the file must exist
# @param FileName: File path to be verified.
#
def _VerifyFile(self, FileName):
if FileName.replace(TAB_WORKSPACE, '').find('$') != -1:
return
if not GlobalData.gAutoGenPhase or not self._GetMacroValue(TAB_DSC_DEFINES_OUTPUT_DIRECTORY) in FileName:
ErrorCode, ErrorInfo = PathClass(NormPath(FileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
## _GetCglSection() method
#
# Get compressed or GUIDed section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @param AlignValue alignment value for complex section
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetCglSection(self, Obj, AlignValue = None):
if self._IsKeyword("COMPRESS"):
type = "PI_STD"
if self._IsKeyword("PI_STD") or self._IsKeyword("PI_NONE"):
type = self._Token
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
CompressSectionObj = CompressSection()
CompressSectionObj.Alignment = AlignValue
CompressSectionObj.CompType = type
# Recursive sections...
while True:
IsLeafSection = self._GetLeafSection(CompressSectionObj)
IsEncapSection = self._GetEncapsulationSec(CompressSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(CompressSectionObj)
return True
elif self._IsKeyword("GUIDED"):
GuidValue = None
if self._GetNextGuid():
GuidValue = self._Token
AttribDict = self._GetGuidAttrib()
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
GuidSectionObj = GuidSection()
GuidSectionObj.Alignment = AlignValue
GuidSectionObj.NameGuid = GuidValue
GuidSectionObj.SectionType = "GUIDED"
GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
GuidSectionObj.ExtraHeaderSize = AttribDict["EXTRA_HEADER_SIZE"]
# Recursive sections...
while True:
IsLeafSection = self._GetLeafSection(GuidSectionObj)
IsEncapSection = self._GetEncapsulationSec(GuidSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(GuidSectionObj)
return True
return False
## _GetGuidAttri() method
#
# Get attributes for GUID section
#
# @param self The object pointer
# @retval AttribDict Dictionary of key-value pair of section attributes
#
def _GetGuidAttrib(self):
AttribDict = {}
AttribDict["PROCESSING_REQUIRED"] = "NONE"
AttribDict["AUTH_STATUS_VALID"] = "NONE"
AttribDict["EXTRA_HEADER_SIZE"] = -1
while self._IsKeyword("PROCESSING_REQUIRED") or self._IsKeyword("AUTH_STATUS_VALID") \
or self._IsKeyword("EXTRA_HEADER_SIZE"):
AttribKey = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("TRUE(1)/FALSE(0)/Number", self.FileName, self.CurrentLineNumber)
elif AttribKey == "EXTRA_HEADER_SIZE":
Base = 10
if self._Token[0:2].upper() == "0X":
Base = 16
try:
AttribDict[AttribKey] = int(self._Token, Base)
continue
except ValueError:
raise Warning.Expected("Number", self.FileName, self.CurrentLineNumber)
elif self._Token.upper() not in {"TRUE", "FALSE", "1", "0"}:
raise Warning.Expected("TRUE/FALSE (1/0)", self.FileName, self.CurrentLineNumber)
AttribDict[AttribKey] = self._Token
return AttribDict
## _GetEncapsulationSec() method
#
# Get encapsulation section for FILE
#
# @param self The object pointer
# @param FfsFile for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetEncapsulationSec(self, FfsFileObj):
OldPos = self.GetFileBufferPos()
if not self._IsKeyword("SECTION"):
if len(FfsFileObj.SectionList) == 0:
raise Warning.Expected("SECTION", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self._GetAlignment():
if self._Token not in ALIGNMENT_NOAUTO:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
AlignValue = self._Token
if not self._GetCglSection(FfsFileObj, AlignValue):
self.SetFileBufferPos(OldPos)
return False
else:
return True
def _GetFmp(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[FMPPAYLOAD."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
self._SkipToToken("[FMPPAYLOAD.", True)
FmpUiName = self._GetUiName().upper()
if FmpUiName in self.Profile.FmpPayloadDict:
raise Warning("Duplicated FMP UI name found: %s" % FmpUiName, self.FileName, self.CurrentLineNumber)
FmpData = CapsulePayload()
FmpData.UiName = FmpUiName
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning("The FMP payload section is empty!", self.FileName, self.CurrentLineNumber)
FmpKeyList = ['IMAGE_HEADER_INIT_VERSION', 'IMAGE_TYPE_ID', 'IMAGE_INDEX', 'HARDWARE_INSTANCE', 'CERTIFICATE_GUID', 'MONOTONIC_COUNT']
while self._Token in FmpKeyList:
Name = self._Token
FmpKeyList.remove(Name)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if Name == 'IMAGE_TYPE_ID':
if not self._GetNextGuid():
raise Warning.Expected("GUID value for IMAGE_TYPE_ID.", self.FileName, self.CurrentLineNumber)
FmpData.ImageTypeId = self._Token
elif Name == 'CERTIFICATE_GUID':
if not self._GetNextGuid():
raise Warning.Expected("GUID value for CERTIFICATE_GUID.", self.FileName, self.CurrentLineNumber)
FmpData.Certificate_Guid = self._Token
if UUID(FmpData.Certificate_Guid) != EFI_CERT_TYPE_RSA2048_SHA256_GUID and UUID(FmpData.Certificate_Guid) != EFI_CERT_TYPE_PKCS7_GUID:
raise Warning("Only support EFI_CERT_TYPE_RSA2048_SHA256_GUID or EFI_CERT_TYPE_PKCS7_GUID for CERTIFICATE_GUID.", self.FileName, self.CurrentLineNumber)
else:
if not self._GetNextToken():
raise Warning.Expected("value of %s" % Name, self.FileName, self.CurrentLineNumber)
Value = self._Token
if Name == 'IMAGE_HEADER_INIT_VERSION':
if FdfParser._Verify(Name, Value, 'UINT8'):
FmpData.Version = Value
elif Name == 'IMAGE_INDEX':
if FdfParser._Verify(Name, Value, 'UINT8'):
FmpData.ImageIndex = Value
elif Name == 'HARDWARE_INSTANCE':
if FdfParser._Verify(Name, Value, 'UINT8'):
FmpData.HardwareInstance = Value
elif Name == 'MONOTONIC_COUNT':
if FdfParser._Verify(Name, Value, 'UINT64'):
FmpData.MonotonicCount = Value
if FmpData.MonotonicCount.upper().startswith('0X'):
FmpData.MonotonicCount = int(FmpData.MonotonicCount, 16)
else:
FmpData.MonotonicCount = int(FmpData.MonotonicCount)
if not self._GetNextToken():
break
else:
self._UndoToken()
if (FmpData.MonotonicCount and not FmpData.Certificate_Guid) or (not FmpData.MonotonicCount and FmpData.Certificate_Guid):
EdkLogger.error("FdfParser", FORMAT_INVALID, "CERTIFICATE_GUID and MONOTONIC_COUNT must be work as a pair.")
# Only the IMAGE_TYPE_ID is required item
if FmpKeyList and 'IMAGE_TYPE_ID' in FmpKeyList:
raise Warning("'IMAGE_TYPE_ID' in FMP payload section.", self.FileName, self.CurrentLineNumber)
# get the Image file and Vendor code file
self._GetFMPCapsuleData(FmpData)
if not FmpData.ImageFile:
raise Warning("Missing image file in FMP payload section.", self.FileName, self.CurrentLineNumber)
# check whether more than one Vendor code file
if len(FmpData.VendorCodeFile) > 1:
raise Warning("Vendor code file max of 1 per FMP payload section.", self.FileName, self.CurrentLineNumber)
self.Profile.FmpPayloadDict[FmpUiName] = FmpData
return True
## _GetCapsule() method
#
# Get capsule section contents and store its data into capsule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a capsule
# @retval False Not able to find a capsule
#
def _GetCapsule(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[CAPSULE."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[CAPSULE.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[Capsule.]", self.FileName, self.CurrentLineNumber)
CapsuleObj = Capsule()
CapsuleName = self._GetUiName()
if not CapsuleName:
raise Warning.Expected("capsule name", self.FileName, self.CurrentLineNumber)
CapsuleObj.UiCapsuleName = CapsuleName.upper()
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
if self._IsKeyword("CREATE_FILE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("file name", self.FileName, self.CurrentLineNumber)
CapsuleObj.CreateFile = self._Token
self._GetCapsuleStatements(CapsuleObj)
self.Profile.CapsuleDict[CapsuleObj.UiCapsuleName] = CapsuleObj
return True
## _GetCapsuleStatements() method
#
# Get statements for capsule
#
# @param self The object pointer
# @param Obj for whom statements are got
#
def _GetCapsuleStatements(self, Obj):
self._GetCapsuleTokens(Obj)
self._GetDefineStatements(Obj)
self._GetSetStatements(Obj)
self._GetCapsuleData(Obj)
## _GetCapsuleTokens() method
#
# Get token statements for capsule
#
# @param self The object pointer
# @param Obj for whom token statements are got
#
def _GetCapsuleTokens(self, Obj):
if not self._GetNextToken():
return False
while self._Token in {"CAPSULE_GUID", "CAPSULE_HEADER_SIZE", "CAPSULE_FLAGS", "OEM_CAPSULE_FLAGS", "CAPSULE_HEADER_INIT_VERSION"}:
Name = self._Token.strip()
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("value", self.FileName, self.CurrentLineNumber)
if Name == 'CAPSULE_FLAGS':
if not self._Token in {"PersistAcrossReset", "PopulateSystemTable", "InitiateReset"}:
raise Warning.Expected("PersistAcrossReset, PopulateSystemTable, or InitiateReset", self.FileName, self.CurrentLineNumber)
Value = self._Token.strip()
while self._IsToken(TAB_COMMA_SPLIT):
Value += TAB_COMMA_SPLIT
if not self._GetNextToken():
raise Warning.Expected("value", self.FileName, self.CurrentLineNumber)
if not self._Token in {"PersistAcrossReset", "PopulateSystemTable", "InitiateReset"}:
raise Warning.Expected("PersistAcrossReset, PopulateSystemTable, or InitiateReset", self.FileName, self.CurrentLineNumber)
Value += self._Token.strip()
elif Name == 'OEM_CAPSULE_FLAGS':
Value = self._Token.strip()
if not Value.upper().startswith('0X'):
raise Warning.Expected("hex value starting with 0x", self.FileName, self.CurrentLineNumber)
try:
Value = int(Value, 0)
except ValueError:
raise Warning.Expected("hex string failed to convert to value", self.FileName, self.CurrentLineNumber)
if not 0x0000 <= Value <= 0xFFFF:
raise Warning.Expected("hex value between 0x0000 and 0xFFFF", self.FileName, self.CurrentLineNumber)
Value = self._Token.strip()
else:
Value = self._Token.strip()
Obj.TokensDict[Name] = Value
if not self._GetNextToken():
return False
self._UndoToken()
## _GetCapsuleData() method
#
# Get capsule data for capsule
#
# @param self The object pointer
# @param Obj for whom capsule data are got
#
def _GetCapsuleData(self, Obj):
while True:
IsInf = self._GetInfStatement(Obj, True)
IsFile = self._GetFileStatement(Obj, True)
IsFv = self._GetFvStatement(Obj)
IsFd = self._GetFdStatement(Obj)
IsAnyFile = self._GetAnyFileStatement(Obj)
IsAfile = self._GetAfileStatement(Obj)
IsFmp = self._GetFmpStatement(Obj)
if not (IsInf or IsFile or IsFv or IsFd or IsAnyFile or IsAfile or IsFmp):
break
## _GetFMPCapsuleData() method
#
# Get capsule data for FMP capsule
#
# @param self The object pointer
# @param Obj for whom capsule data are got
#
def _GetFMPCapsuleData(self, Obj):
while True:
IsFv = self._GetFvStatement(Obj, True)
IsFd = self._GetFdStatement(Obj, True)
IsAnyFile = self._GetAnyFileStatement(Obj, True)
if not (IsFv or IsFd or IsAnyFile):
break
## _GetFvStatement() method
#
# Get FV for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom FV is got
# @retval True Successfully find a FV statement
# @retval False Not able to find a FV statement
#
def _GetFvStatement(self, CapsuleObj, FMPCapsule = False):
if not self._IsKeyword(BINARY_FILE_TYPE_FV):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in self.Profile.FvDict:
raise Warning("FV name does not exist", self.FileName, self.CurrentLineNumber)
myCapsuleFv = CapsuleFv()
myCapsuleFv.FvName = self._Token
if FMPCapsule:
if not CapsuleObj.ImageFile:
CapsuleObj.ImageFile.append(myCapsuleFv)
else:
CapsuleObj.VendorCodeFile.append(myCapsuleFv)
else:
CapsuleObj.CapsuleDataList.append(myCapsuleFv)
return True
## _GetFdStatement() method
#
# Get FD for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom FD is got
# @retval True Successfully find a FD statement
# @retval False Not able to find a FD statement
#
def _GetFdStatement(self, CapsuleObj, FMPCapsule = False):
if not self._IsKeyword("FD"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FD name", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in self.Profile.FdDict:
raise Warning("FD name does not exist", self.FileName, self.CurrentLineNumber)
myCapsuleFd = CapsuleFd()
myCapsuleFd.FdName = self._Token
if FMPCapsule:
if not CapsuleObj.ImageFile:
CapsuleObj.ImageFile.append(myCapsuleFd)
else:
CapsuleObj.VendorCodeFile.append(myCapsuleFd)
else:
CapsuleObj.CapsuleDataList.append(myCapsuleFd)
return True
def _GetFmpStatement(self, CapsuleObj):
if not self._IsKeyword("FMP_PAYLOAD"):
if not self._IsKeyword("FMP"):
return False
if not self._IsKeyword("PAYLOAD"):
self._UndoToken()
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("payload name after FMP_PAYLOAD =", self.FileName, self.CurrentLineNumber)
Payload = self._Token.upper()
if Payload not in self.Profile.FmpPayloadDict:
raise Warning("This FMP Payload does not exist: %s" % self._Token, self.FileName, self.CurrentLineNumber)
CapsuleObj.FmpPayloadList.append(self.Profile.FmpPayloadDict[Payload])
return True
def _ParseRawFileStatement(self):
if not self._IsKeyword("FILE"):
return None
if not self._IsKeyword("DATA"):
self._UndoToken()
return None
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("File name", self.FileName, self.CurrentLineNumber)
AnyFileName = self._Token
self._VerifyFile(AnyFileName)
if not os.path.isabs(AnyFileName):
AnyFileName = mws.join(GenFdsGlobalVariable.WorkSpaceDir, AnyFileName)
return AnyFileName
## _GetAnyFileStatement() method
#
# Get AnyFile for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom AnyFile is got
# @retval True Successfully find a Anyfile statement
# @retval False Not able to find a AnyFile statement
#
def _GetAnyFileStatement(self, CapsuleObj, FMPCapsule = False):
AnyFileName = self._ParseRawFileStatement()
if not AnyFileName:
return False
myCapsuleAnyFile = CapsuleAnyFile()
myCapsuleAnyFile.FileName = AnyFileName
if FMPCapsule:
if not CapsuleObj.ImageFile:
CapsuleObj.ImageFile.append(myCapsuleAnyFile)
else:
CapsuleObj.VendorCodeFile.append(myCapsuleAnyFile)
else:
CapsuleObj.CapsuleDataList.append(myCapsuleAnyFile)
return True
## _GetAfileStatement() method
#
# Get Afile for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom Afile is got
# @retval True Successfully find a Afile statement
# @retval False Not able to find a Afile statement
#
def _GetAfileStatement(self, CapsuleObj):
if not self._IsKeyword("APPEND"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Afile name", self.FileName, self.CurrentLineNumber)
AfileName = self._Token
AfileBaseName = os.path.basename(AfileName)
if os.path.splitext(AfileBaseName)[1] not in {".bin", ".BIN", ".Bin", ".dat", ".DAT", ".Dat", ".data", ".DATA", ".Data"}:
raise Warning('invalid binary file type, should be one of "bin",BINARY_FILE_TYPE_BIN,"Bin","dat","DAT","Dat","data","DATA","Data"', \
self.FileName, self.CurrentLineNumber)
if not os.path.isabs(AfileName):
AfileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(AfileName)
self._VerifyFile(AfileName)
else:
if not os.path.exists(AfileName):
raise Warning('%s does not exist' % AfileName, self.FileName, self.CurrentLineNumber)
else:
pass
myCapsuleAfile = CapsuleAfile()
myCapsuleAfile.FileName = AfileName
CapsuleObj.CapsuleDataList.append(myCapsuleAfile)
return True
## _GetRule() method
#
# Get Rule section contents and store its data into rule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a Rule
# @retval False Not able to find a Rule
#
def _GetRule(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[RULE."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[Rule.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[Rule.]", self.FileName, self.CurrentLineNumber)
if not self._SkipToToken(TAB_SPLIT):
raise Warning.Expected("'.'", self.FileName, self.CurrentLineNumber)
Arch = self._SkippedChars.rstrip(TAB_SPLIT)
if Arch.upper() not in ARCH_SET_FULL:
raise Warning("Unknown Arch '%s'" % Arch, self.FileName, self.CurrentLineNumber)
ModuleType = self._GetModuleType()
TemplateName = ""
if self._IsToken(TAB_SPLIT):
if not self._GetNextWord():
raise Warning.Expected("template name", self.FileName, self.CurrentLineNumber)
TemplateName = self._Token
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
RuleObj = self._GetRuleFileStatements()
RuleObj.Arch = Arch.upper()
RuleObj.ModuleType = ModuleType
RuleObj.TemplateName = TemplateName
if TemplateName == '':
self.Profile.RuleDict['RULE' + \
TAB_SPLIT + \
Arch.upper() + \
TAB_SPLIT + \
ModuleType.upper() ] = RuleObj
else:
self.Profile.RuleDict['RULE' + \
TAB_SPLIT + \
Arch.upper() + \
TAB_SPLIT + \
ModuleType.upper() + \
TAB_SPLIT + \
TemplateName.upper() ] = RuleObj
return True
## _GetModuleType() method
#
# Return the module type
#
# @param self The object pointer
# @retval string module type
#
def _GetModuleType(self):
if not self._GetNextWord():
raise Warning.Expected("Module type", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in {
SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM,
SUP_MODULE_DXE_CORE, SUP_MODULE_DXE_DRIVER,
SUP_MODULE_DXE_SAL_DRIVER, SUP_MODULE_DXE_SMM_DRIVER,
SUP_MODULE_DXE_RUNTIME_DRIVER, SUP_MODULE_UEFI_DRIVER,
SUP_MODULE_UEFI_APPLICATION, SUP_MODULE_USER_DEFINED,
TAB_DEFAULT, SUP_MODULE_BASE,
EDK_COMPONENT_TYPE_SECURITY_CORE,
EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER,
EDK_COMPONENT_TYPE_PIC_PEIM,
EDK_COMPONENT_TYPE_RELOCATABLE_PEIM, "PE32_PEIM",
EDK_COMPONENT_TYPE_BS_DRIVER, EDK_COMPONENT_TYPE_RT_DRIVER,
EDK_COMPONENT_TYPE_SAL_RT_DRIVER,
EDK_COMPONENT_TYPE_APPLICATION, "ACPITABLE",
SUP_MODULE_SMM_CORE, SUP_MODULE_MM_STANDALONE,
SUP_MODULE_MM_CORE_STANDALONE}:
raise Warning("Unknown Module type '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
return self._Token
## _GetFileExtension() method
#
# Return the file extension
#
# @param self The object pointer
# @retval string file name extension
#
def _GetFileExtension(self):
if not self._IsToken(TAB_SPLIT):
raise Warning.Expected("'.'", self.FileName, self.CurrentLineNumber)
Ext = ""
if self._GetNextToken():
if FileExtensionPattern.match(self._Token):
Ext = self._Token
return TAB_SPLIT + Ext
else:
raise Warning("Unknown file extension '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
else:
raise Warning.Expected("file extension", self.FileName, self.CurrentLineNumber)
## _GetRuleFileStatement() method
#
# Get rule contents
#
# @param self The object pointer
# @retval Rule Rule object
#
def _GetRuleFileStatements(self):
if not self._IsKeyword("FILE"):
raise Warning.Expected("FILE", self.FileName, self.CurrentLineNumber)
if not self._GetNextWord():
raise Warning.Expected("FFS type", self.FileName, self.CurrentLineNumber)
Type = self._Token.strip().upper()
if Type not in {"RAW", "FREEFORM", SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM,
"PEI_DXE_COMBO", "DRIVER", SUP_MODULE_DXE_CORE, EDK_COMPONENT_TYPE_APPLICATION,
"FV_IMAGE", "SMM", SUP_MODULE_SMM_CORE, SUP_MODULE_MM_STANDALONE,
SUP_MODULE_MM_CORE_STANDALONE}:
raise Warning("Unknown FV type '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsKeyword("$(NAMED_GUID)"):
if not self._GetNextWord():
raise Warning.Expected("$(NAMED_GUID)", self.FileName, self.CurrentLineNumber)
if self._Token == 'PCD':
if not self._IsToken("("):
raise Warning.Expected("'('", self.FileName, self.CurrentLineNumber)
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(")"):
raise Warning.Expected("')'", self.FileName, self.CurrentLineNumber)
self._Token = 'PCD('+PcdPair[1]+TAB_SPLIT+PcdPair[0]+')'
NameGuid = self._Token
KeepReloc = None
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._FileCouldHaveRelocFlag(Type):
if self._Token == 'RELOCS_STRIPPED':
KeepReloc = False
else:
KeepReloc = True
else:
raise Warning("File type %s could not have reloc strip flag%d" % (Type, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
KeyStringList = []
if self._GetNextToken():
if TokenFindPattern.match(self._Token):
KeyStringList.append(self._Token)
if self._IsToken(TAB_COMMA_SPLIT):
while self._GetNextToken():
if not TokenFindPattern.match(self._Token):
raise Warning.Expected("KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
break
else:
self._UndoToken()
Fixed = False
if self._IsKeyword("Fixed", True):
Fixed = True
CheckSum = False
if self._IsKeyword("CheckSum", True):
CheckSum = True
AlignValue = ""
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self._Token == "Auto":
AlignValue = self._Token
if self._IsToken("{"):
# Complex file rule expected
NewRule = RuleComplexFile()
NewRule.FvFileType = Type
NewRule.NameGuid = NameGuid
NewRule.Alignment = AlignValue
NewRule.CheckSum = CheckSum
NewRule.Fixed = Fixed
NewRule.KeyStringList = KeyStringList
if KeepReloc is not None:
NewRule.KeepReloc = KeepReloc
while True:
IsEncapsulate = self._GetRuleEncapsulationSection(NewRule)
IsLeaf = self._GetEfiSection(NewRule)
if not IsEncapsulate and not IsLeaf:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
return NewRule
else:
# Simple file rule expected
if not self._GetNextWord():
raise Warning.Expected("leaf section type", self.FileName, self.CurrentLineNumber)
SectionName = self._Token
if SectionName not in {
"COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE, "FV_IMAGE",
"RAW",BINARY_FILE_TYPE_DXE_DEPEX, BINARY_FILE_TYPE_UI,
BINARY_FILE_TYPE_PEI_DEPEX, "VERSION", "SUBTYPE_GUID",
BINARY_FILE_TYPE_SMM_DEPEX}:
raise Warning("Unknown leaf section name '%s'" % SectionName, self.FileName, self.CurrentLineNumber)
if self._IsKeyword("Fixed", True):
Fixed = True
if self._IsKeyword("CheckSum", True):
CheckSum = True
SectAlignment = ""
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if self._Token == 'Auto' and (not SectionName == BINARY_FILE_TYPE_PE32) and (not SectionName == BINARY_FILE_TYPE_TE):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
SectAlignment = self._Token
Ext = None
if self._IsToken(TAB_VALUE_SPLIT):
Ext = self._GetFileExtension()
elif not self._GetNextToken():
raise Warning.Expected("File name", self.FileName, self.CurrentLineNumber)
NewRule = RuleSimpleFile()
NewRule.SectionType = SectionName
NewRule.FvFileType = Type
NewRule.NameGuid = NameGuid
NewRule.Alignment = AlignValue
NewRule.SectAlignment = SectAlignment
NewRule.CheckSum = CheckSum
NewRule.Fixed = Fixed
NewRule.KeyStringList = KeyStringList
if KeepReloc is not None:
NewRule.KeepReloc = KeepReloc
NewRule.FileExtension = Ext
NewRule.FileName = self._Token
return NewRule
## _GetEfiSection() method
#
# Get section list for Rule
#
# @param self The object pointer
# @param Obj for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetEfiSection(self, Obj):
OldPos = self.GetFileBufferPos()
if not self._GetNextWord():
return False
SectionName = self._Token
if SectionName not in {
"COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE, "FV_IMAGE",
"RAW",BINARY_FILE_TYPE_DXE_DEPEX, BINARY_FILE_TYPE_UI,
BINARY_FILE_TYPE_PEI_DEPEX, "VERSION", "SUBTYPE_GUID",
BINARY_FILE_TYPE_SMM_DEPEX, BINARY_FILE_TYPE_GUID}:
self._UndoToken()
return False
if SectionName == "FV_IMAGE":
FvImageSectionObj = FvImageSection()
if self._IsKeyword("FV_IMAGE"):
pass
if self._IsToken("{"):
FvObj = FV()
self._GetDefineStatements(FvObj)
self._GetBlockStatement(FvObj)
self._GetSetStatements(FvObj)
self._GetFvAlignment(FvObj)
self._GetFvAttributes(FvObj)
self._GetAprioriSection(FvObj)
self._GetAprioriSection(FvObj)
while True:
IsInf = self._GetInfStatement(FvObj)
IsFile = self._GetFileStatement(FvObj)
if not IsInf and not IsFile:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
if not self._IsKeyword(BINARY_FILE_TYPE_FV):
raise Warning.Expected("'FV'", self.FileName, self.CurrentLineNumber)
FvImageSectionObj.FvFileType = self._Token
if self._GetAlignment():
if self._Token not in ALIGNMENT_NOAUTO:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
FvImageSectionObj.Alignment = self._Token
if self._IsToken(TAB_VALUE_SPLIT):
FvImageSectionObj.FvFileExtension = self._GetFileExtension()
elif self._GetNextToken():
if self._Token not in {
T_CHAR_BRACE_R, "COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE,
"FV_IMAGE", "RAW", BINARY_FILE_TYPE_DXE_DEPEX,
BINARY_FILE_TYPE_UI, "VERSION",
BINARY_FILE_TYPE_PEI_DEPEX, BINARY_FILE_TYPE_GUID,
BINARY_FILE_TYPE_SMM_DEPEX}:
FvImageSectionObj.FvFileName = self._Token
else:
self._UndoToken()
else:
raise Warning.Expected("FV file name", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(FvImageSectionObj)
return True
EfiSectionObj = EfiSection()
EfiSectionObj.SectionType = SectionName
if not self._GetNextToken():
raise Warning.Expected("file type", self.FileName, self.CurrentLineNumber)
if self._Token == "STRING":
if not self._RuleSectionCouldHaveString(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have string data%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Quoted String", self.FileName, self.CurrentLineNumber)
if self._GetStringData():
EfiSectionObj.StringData = self._Token
if self._IsKeyword("BUILD_NUM"):
if not self._RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have BUILD_NUM%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Build number", self.FileName, self.CurrentLineNumber)
EfiSectionObj.BuildNum = self._Token
else:
EfiSectionObj.FileType = self._Token
self._CheckRuleSectionFileType(EfiSectionObj.SectionType, EfiSectionObj.FileType)
if self._IsKeyword("Optional"):
if not self._RuleSectionCouldBeOptional(EfiSectionObj.SectionType):
raise Warning("%s section could NOT be optional%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
EfiSectionObj.Optional = True
if self._IsKeyword("BUILD_NUM"):
if not self._RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have BUILD_NUM%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Build number", self.FileName, self.CurrentLineNumber)
EfiSectionObj.BuildNum = self._Token
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if self._Token == 'Auto' and (not SectionName == BINARY_FILE_TYPE_PE32) and (not SectionName == BINARY_FILE_TYPE_TE):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
EfiSectionObj.Alignment = self._Token
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._SectionCouldHaveRelocFlag(EfiSectionObj.SectionType):
if self._Token == 'RELOCS_STRIPPED':
EfiSectionObj.KeepReloc = False
else:
EfiSectionObj.KeepReloc = True
if Obj.KeepReloc is not None and Obj.KeepReloc != EfiSectionObj.KeepReloc:
raise Warning("Section type %s has reloc strip flag conflict with Rule" % EfiSectionObj.SectionType, self.FileName, self.CurrentLineNumber)
else:
raise Warning("Section type %s could not have reloc strip flag" % EfiSectionObj.SectionType, self.FileName, self.CurrentLineNumber)
if self._IsToken(TAB_VALUE_SPLIT):
EfiSectionObj.FileExtension = self._GetFileExtension()
elif self._GetNextToken():
if self._Token not in {
T_CHAR_BRACE_R, "COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE,
"FV_IMAGE", "RAW", BINARY_FILE_TYPE_DXE_DEPEX,
BINARY_FILE_TYPE_UI, "VERSION",
BINARY_FILE_TYPE_PEI_DEPEX, BINARY_FILE_TYPE_GUID,
BINARY_FILE_TYPE_SMM_DEPEX}:
if self._Token.startswith('PCD'):
self._UndoToken()
self._GetNextWord()
if self._Token == 'PCD':
if not self._IsToken("("):
raise Warning.Expected("'('", self.FileName, self.CurrentLineNumber)
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(")"):
raise Warning.Expected("')'", self.FileName, self.CurrentLineNumber)
self._Token = 'PCD('+PcdPair[1]+TAB_SPLIT+PcdPair[0]+')'
EfiSectionObj.FileName = self._Token
else:
self._UndoToken()
else:
raise Warning.Expected("section file name", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(EfiSectionObj)
return True
## _RuleSectionCouldBeOptional() method
#
# Get whether a section could be optional
#
# @param SectionType The section type to check
# @retval True section could be optional
# @retval False section never optional
#
@staticmethod
def _RuleSectionCouldBeOptional(SectionType):
if SectionType in {BINARY_FILE_TYPE_DXE_DEPEX, BINARY_FILE_TYPE_UI, "VERSION", BINARY_FILE_TYPE_PEI_DEPEX, "RAW", BINARY_FILE_TYPE_SMM_DEPEX}:
return True
else:
return False
## _RuleSectionCouldHaveBuildNum() method
#
# Get whether a section could have build number information
#
# @param SectionType The section type to check
# @retval True section could have build number information
# @retval False section never have build number information
#
@staticmethod
def _RuleSectionCouldHaveBuildNum(SectionType):
if SectionType == "VERSION":
return True
else:
return False
## _RuleSectionCouldHaveString() method
#
# Get whether a section could have string
#
# @param SectionType The section type to check
# @retval True section could have string
# @retval False section never have string
#
@staticmethod
def _RuleSectionCouldHaveString(SectionType):
if SectionType in {BINARY_FILE_TYPE_UI, "VERSION"}:
return True
else:
return False
## _CheckRuleSectionFileType() method
#
# Get whether a section matches a file type
#
# @param self The object pointer
# @param SectionType The section type to check
# @param FileType The file type to check
#
def _CheckRuleSectionFileType(self, SectionType, FileType):
WarningString = "Incorrect section file type '%s'"
if SectionType == "COMPAT16":
if FileType not in {"COMPAT16", "SEC_COMPAT16"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_PE32:
if FileType not in {BINARY_FILE_TYPE_PE32, "SEC_PE32"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_PIC:
if FileType not in {BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_PIC}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_TE:
if FileType not in {BINARY_FILE_TYPE_TE, "SEC_TE"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "RAW":
if FileType not in {BINARY_FILE_TYPE_BIN, "SEC_BIN", "RAW", "ASL", "ACPI"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_DXE_DEPEX or SectionType == BINARY_FILE_TYPE_SMM_DEPEX:
if FileType not in {BINARY_FILE_TYPE_DXE_DEPEX, "SEC_DXE_DEPEX", BINARY_FILE_TYPE_SMM_DEPEX}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_UI:
if FileType not in {BINARY_FILE_TYPE_UI, "SEC_UI"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "VERSION":
if FileType not in {"VERSION", "SEC_VERSION"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_PEI_DEPEX:
if FileType not in {BINARY_FILE_TYPE_PEI_DEPEX, "SEC_PEI_DEPEX"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_GUID:
if FileType not in {BINARY_FILE_TYPE_PE32, "SEC_GUID"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
## _GetRuleEncapsulationSection() method
#
# Get encapsulation section for Rule
#
# @param self The object pointer
# @param theRule for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetRuleEncapsulationSection(self, theRule):
if self._IsKeyword("COMPRESS"):
Type = "PI_STD"
if self._IsKeyword("PI_STD") or self._IsKeyword("PI_NONE"):
Type = self._Token
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
CompressSectionObj = CompressSection()
CompressSectionObj.CompType = Type
# Recursive sections...
while True:
IsEncapsulate = self._GetRuleEncapsulationSection(CompressSectionObj)
IsLeaf = self._GetEfiSection(CompressSectionObj)
if not IsEncapsulate and not IsLeaf:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
theRule.SectionList.append(CompressSectionObj)
return True
elif self._IsKeyword("GUIDED"):
GuidValue = None
if self._GetNextGuid():
GuidValue = self._Token
if self._IsKeyword("$(NAMED_GUID)"):
GuidValue = self._Token
AttribDict = self._GetGuidAttrib()
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
GuidSectionObj = GuidSection()
GuidSectionObj.NameGuid = GuidValue
GuidSectionObj.SectionType = "GUIDED"
GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
GuidSectionObj.ExtraHeaderSize = AttribDict["EXTRA_HEADER_SIZE"]
# Efi sections...
while True:
IsEncapsulate = self._GetRuleEncapsulationSection(GuidSectionObj)
IsLeaf = self._GetEfiSection(GuidSectionObj)
if not IsEncapsulate and not IsLeaf:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
theRule.SectionList.append(GuidSectionObj)
return True
return False
## _GetOptionRom() method
#
# Get OptionROM section contents and store its data into OptionROM list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a OptionROM
# @retval False Not able to find a OptionROM
#
def _GetOptionRom(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[OPTIONROM."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[OptionRom.", True):
raise Warning("Unknown Keyword '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
OptRomName = self._GetUiName()
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
OptRomObj = OPTIONROM(OptRomName)
self.Profile.OptRomDict[OptRomName] = OptRomObj
while True:
isInf = self._GetOptRomInfStatement(OptRomObj)
isFile = self._GetOptRomFileStatement(OptRomObj)
if not isInf and not isFile:
break
return True
## _GetOptRomInfStatement() method
#
# Get INF statements
#
# @param self The object pointer
# @param Obj for whom inf statement is got
# @retval True Successfully find inf statement
# @retval False Not able to find inf statement
#
def _GetOptRomInfStatement(self, Obj):
if not self._IsKeyword("INF"):
return False
ffsInf = OptRomInfStatement()
self._GetInfOptions(ffsInf)
if not self._GetNextToken():
raise Warning.Expected("INF file path", self.FileName, self.CurrentLineNumber)
ffsInf.InfFileName = self._Token
if ffsInf.InfFileName.replace(TAB_WORKSPACE, '').find('$') == -1:
#check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(ffsInf.InfFileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
NewFileName = ffsInf.InfFileName
if ffsInf.OverrideGuid:
NewFileName = ProcessDuplicatedInf(PathClass(ffsInf.InfFileName,GenFdsGlobalVariable.WorkSpaceDir), ffsInf.OverrideGuid, GenFdsGlobalVariable.WorkSpaceDir).Path
if not NewFileName in self.Profile.InfList:
self.Profile.InfList.append(NewFileName)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.InfFileLineList.append(FileLineTuple)
if ffsInf.UseArch:
if ffsInf.UseArch not in self.Profile.InfDict:
self.Profile.InfDict[ffsInf.UseArch] = [ffsInf.InfFileName]
else:
self.Profile.InfDict[ffsInf.UseArch].append(ffsInf.InfFileName)
else:
self.Profile.InfDict['ArchTBD'].append(ffsInf.InfFileName)
self._GetOptRomOverrides (ffsInf)
Obj.FfsList.append(ffsInf)
return True
## _GetOptRomOverrides() method
#
# Get overrides for OptROM INF & FILE
#
# @param self The object pointer
# @param FfsInfObj for whom overrides is got
#
def _GetOptRomOverrides(self, Obj):
if self._IsToken('{'):
Overrides = OverrideAttribs()
while True:
if self._IsKeyword("PCI_VENDOR_ID"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex vendor id", self.FileName, self.CurrentLineNumber)
Overrides.PciVendorId = self._Token
continue
if self._IsKeyword("PCI_CLASS_CODE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex class code", self.FileName, self.CurrentLineNumber)
Overrides.PciClassCode = self._Token
continue
if self._IsKeyword("PCI_DEVICE_ID"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
# Get a list of PCI IDs
Overrides.PciDeviceId = ""
while (self._GetNextHexNumber()):
Overrides.PciDeviceId = "{} {}".format(Overrides.PciDeviceId, self._Token)
if not Overrides.PciDeviceId:
raise Warning.Expected("one or more Hex device ids", self.FileName, self.CurrentLineNumber)
continue
if self._IsKeyword("PCI_REVISION"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex revision", self.FileName, self.CurrentLineNumber)
Overrides.PciRevision = self._Token
continue
if self._IsKeyword("PCI_COMPRESS"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("TRUE/FALSE for compress", self.FileName, self.CurrentLineNumber)
Overrides.NeedCompress = self._Token.upper() == 'TRUE'
continue
if self._IsToken(T_CHAR_BRACE_R):
break
else:
EdkLogger.error("FdfParser", FORMAT_INVALID, File=self.FileName, Line=self.CurrentLineNumber)
Obj.OverrideAttribs = Overrides
## _GetOptRomFileStatement() method
#
# Get FILE statements
#
# @param self The object pointer
# @param Obj for whom FILE statement is got
# @retval True Successfully find FILE statement
# @retval False Not able to find FILE statement
#
def _GetOptRomFileStatement(self, Obj):
if not self._IsKeyword("FILE"):
return False
FfsFileObj = OptRomFileStatement()
if not self._IsKeyword("EFI") and not self._IsKeyword(BINARY_FILE_TYPE_BIN):
raise Warning.Expected("Binary type (EFI/BIN)", self.FileName, self.CurrentLineNumber)
FfsFileObj.FileType = self._Token
if not self._GetNextToken():
raise Warning.Expected("File path", self.FileName, self.CurrentLineNumber)
FfsFileObj.FileName = self._Token
if FfsFileObj.FileName.replace(TAB_WORKSPACE, '').find('$') == -1:
#check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(FfsFileObj.FileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
if FfsFileObj.FileType == 'EFI':
self._GetOptRomOverrides(FfsFileObj)
Obj.FfsList.append(FfsFileObj)
return True
## _GetCapInFd() method
#
# Get Cap list contained in FD
#
# @param self The object pointer
# @param FdName FD name
# @retval CapList List of Capsule in FD
#
def _GetCapInFd (self, FdName):
CapList = []
if FdName.upper() in self.Profile.FdDict:
FdObj = self.Profile.FdDict[FdName.upper()]
for elementRegion in FdObj.RegionList:
if elementRegion.RegionType == 'CAPSULE':
for elementRegionData in elementRegion.RegionDataList:
if elementRegionData.endswith(".cap"):
continue
if elementRegionData is not None and elementRegionData.upper() not in CapList:
CapList.append(elementRegionData.upper())
return CapList
## _GetReferencedFdCapTuple() method
#
# Get FV and FD list referenced by a capsule image
#
# @param self The object pointer
# @param CapObj Capsule section to be searched
# @param RefFdList referenced FD by section
# @param RefFvList referenced FV by section
#
def _GetReferencedFdCapTuple(self, CapObj, RefFdList = [], RefFvList = []):
for CapsuleDataObj in CapObj.CapsuleDataList:
if hasattr(CapsuleDataObj, 'FvName') and CapsuleDataObj.FvName is not None and CapsuleDataObj.FvName.upper() not in RefFvList:
RefFvList.append (CapsuleDataObj.FvName.upper())
elif hasattr(CapsuleDataObj, 'FdName') and CapsuleDataObj.FdName is not None and CapsuleDataObj.FdName.upper() not in RefFdList:
RefFdList.append (CapsuleDataObj.FdName.upper())
elif CapsuleDataObj.Ffs is not None:
if isinstance(CapsuleDataObj.Ffs, FileStatement):
if CapsuleDataObj.Ffs.FvName is not None and CapsuleDataObj.Ffs.FvName.upper() not in RefFvList:
RefFvList.append(CapsuleDataObj.Ffs.FvName.upper())
elif CapsuleDataObj.Ffs.FdName is not None and CapsuleDataObj.Ffs.FdName.upper() not in RefFdList:
RefFdList.append(CapsuleDataObj.Ffs.FdName.upper())
else:
self._GetReferencedFdFvTupleFromSection(CapsuleDataObj.Ffs, RefFdList, RefFvList)
## _GetFvInFd() method
#
# Get FV list contained in FD
#
# @param self The object pointer
# @param FdName FD name
# @retval FvList list of FV in FD
#
def _GetFvInFd (self, FdName):
FvList = []
if FdName.upper() in self.Profile.FdDict:
FdObj = self.Profile.FdDict[FdName.upper()]
for elementRegion in FdObj.RegionList:
if elementRegion.RegionType == BINARY_FILE_TYPE_FV:
for elementRegionData in elementRegion.RegionDataList:
if elementRegionData.endswith(".fv"):
continue
if elementRegionData is not None and elementRegionData.upper() not in FvList:
FvList.append(elementRegionData.upper())
return FvList
## _GetReferencedFdFvTuple() method
#
# Get FD and FV list referenced by a FFS file
#
# @param self The object pointer
# @param FfsFile contains sections to be searched
# @param RefFdList referenced FD by section
# @param RefFvList referenced FV by section
#
def _GetReferencedFdFvTuple(self, FvObj, RefFdList = [], RefFvList = []):
for FfsObj in FvObj.FfsList:
if isinstance(FfsObj, FileStatement):
if FfsObj.FvName is not None and FfsObj.FvName.upper() not in RefFvList:
RefFvList.append(FfsObj.FvName.upper())
elif FfsObj.FdName is not None and FfsObj.FdName.upper() not in RefFdList:
RefFdList.append(FfsObj.FdName.upper())
else:
self._GetReferencedFdFvTupleFromSection(FfsObj, RefFdList, RefFvList)
## _GetReferencedFdFvTupleFromSection() method
#
# Get FD and FV list referenced by a FFS section
#
# @param self The object pointer
# @param FfsFile contains sections to be searched
# @param FdList referenced FD by section
# @param FvList referenced FV by section
#
def _GetReferencedFdFvTupleFromSection(self, FfsFile, FdList = [], FvList = []):
SectionStack = list(FfsFile.SectionList)
while SectionStack != []:
SectionObj = SectionStack.pop()
if isinstance(SectionObj, FvImageSection):
if SectionObj.FvName is not None and SectionObj.FvName.upper() not in FvList:
FvList.append(SectionObj.FvName.upper())
if SectionObj.Fv is not None and SectionObj.Fv.UiFvName is not None and SectionObj.Fv.UiFvName.upper() not in FvList:
FvList.append(SectionObj.Fv.UiFvName.upper())
self._GetReferencedFdFvTuple(SectionObj.Fv, FdList, FvList)
if isinstance(SectionObj, CompressSection) or isinstance(SectionObj, GuidSection):
SectionStack.extend(SectionObj.SectionList)
## CycleReferenceCheck() method
#
# Check whether cycle reference exists in FDF
#
# @param self The object pointer
# @retval True cycle reference exists
# @retval False Not exists cycle reference
#
def CycleReferenceCheck(self):
#
# Check the cycle between FV and FD image
#
MaxLength = len (self.Profile.FvDict)
for FvName in self.Profile.FvDict:
LogStr = "\nCycle Reference Checking for FV: %s\n" % FvName
RefFvStack = set(FvName)
FdAnalyzedList = set()
Index = 0
while RefFvStack and Index < MaxLength:
Index = Index + 1
FvNameFromStack = RefFvStack.pop()
if FvNameFromStack.upper() in self.Profile.FvDict:
FvObj = self.Profile.FvDict[FvNameFromStack.upper()]
else:
continue
RefFdList = []
RefFvList = []
self._GetReferencedFdFvTuple(FvObj, RefFdList, RefFvList)
for RefFdName in RefFdList:
if RefFdName in FdAnalyzedList:
continue
LogStr += "FV %s contains FD %s\n" % (FvNameFromStack, RefFdName)
FvInFdList = self._GetFvInFd(RefFdName)
if FvInFdList != []:
for FvNameInFd in FvInFdList:
LogStr += "FD %s contains FV %s\n" % (RefFdName, FvNameInFd)
if FvNameInFd not in RefFvStack:
RefFvStack.add(FvNameInFd)
if FvName in RefFvStack or FvNameFromStack in RefFvStack:
EdkLogger.info(LogStr)
return True
FdAnalyzedList.add(RefFdName)
for RefFvName in RefFvList:
LogStr += "FV %s contains FV %s\n" % (FvNameFromStack, RefFvName)
if RefFvName not in RefFvStack:
RefFvStack.add(RefFvName)
if FvName in RefFvStack or FvNameFromStack in RefFvStack:
EdkLogger.info(LogStr)
return True
#
# Check the cycle between Capsule and FD image
#
MaxLength = len (self.Profile.CapsuleDict)
for CapName in self.Profile.CapsuleDict:
#
# Capsule image to be checked.
#
LogStr = "\n\n\nCycle Reference Checking for Capsule: %s\n" % CapName
RefCapStack = {CapName}
FdAnalyzedList = set()
FvAnalyzedList = set()
Index = 0
while RefCapStack and Index < MaxLength:
Index = Index + 1
CapNameFromStack = RefCapStack.pop()
if CapNameFromStack.upper() in self.Profile.CapsuleDict:
CapObj = self.Profile.CapsuleDict[CapNameFromStack.upper()]
else:
continue
RefFvList = []
RefFdList = []
self._GetReferencedFdCapTuple(CapObj, RefFdList, RefFvList)
FvListLength = 0
FdListLength = 0
while FvListLength < len (RefFvList) or FdListLength < len (RefFdList):
for RefFdName in RefFdList:
if RefFdName in FdAnalyzedList:
continue
LogStr += "Capsule %s contains FD %s\n" % (CapNameFromStack, RefFdName)
for CapNameInFd in self._GetCapInFd(RefFdName):
LogStr += "FD %s contains Capsule %s\n" % (RefFdName, CapNameInFd)
if CapNameInFd not in RefCapStack:
RefCapStack.append(CapNameInFd)
if CapName in RefCapStack or CapNameFromStack in RefCapStack:
EdkLogger.info(LogStr)
return True
for FvNameInFd in self._GetFvInFd(RefFdName):
LogStr += "FD %s contains FV %s\n" % (RefFdName, FvNameInFd)
if FvNameInFd not in RefFvList:
RefFvList.append(FvNameInFd)
FdAnalyzedList.add(RefFdName)
#
# the number of the parsed FV and FD image
#
FvListLength = len (RefFvList)
FdListLength = len (RefFdList)
for RefFvName in RefFvList:
if RefFvName in FvAnalyzedList:
continue
LogStr += "Capsule %s contains FV %s\n" % (CapNameFromStack, RefFvName)
if RefFvName.upper() in self.Profile.FvDict:
FvObj = self.Profile.FvDict[RefFvName.upper()]
else:
continue
self._GetReferencedFdFvTuple(FvObj, RefFdList, RefFvList)
FvAnalyzedList.add(RefFvName)
return False
def GetAllIncludedFile (self):
global AllIncludeFileList
return AllIncludeFileList
if __name__ == "__main__":
import sys
try:
test_file = sys.argv[1]
except IndexError as v:
print("Usage: %s filename" % sys.argv[0])
sys.exit(1)
parser = FdfParser(test_file)
try:
parser.ParseFile()
parser.CycleReferenceCheck()
except Warning as X:
print(str(X))
else:
print("Success!")
| apache-2.0 | -8,537,984,819,681,910,000 | 40.938762 | 223 | 0.573885 | false |
d0c-s4vage/pfp | tests/utils.py | 1 | 2362 | #!/usr/bin/env python
# encoding: utf-8
import six
import sys
import pfp
import pfp.utils
import unittest
import contextlib
class PfpTestMeta(type):
def __init__(cls, name, bases, dict_):
for attr_name, attr_val in six.iteritems(dict_):
if not attr_name.startswith("test_"):
continue
if not hasattr(attr_val, "__call__"):
continue
new_func_name = attr_name + "_with_string_data"
new_func = cls._create_string_data_test(attr_val)
setattr(cls, new_func_name, new_func)
return super(PfpTestMeta, cls).__init__(name, bases, dict_)
def _create_string_data_test(cls, method):
"""Wrap the test method in a new function that causes _test_parse_build
to use _stream=False as the default in order to test string data
as input to pfp.parse
"""
@contextlib.wraps(method)
def new_method(self, *args, **kwargs):
self._test_parse_build = self._test_parse_build_with_string
try:
res = method(self, *args, **kwargs)
finally:
self._test_parse_build = cls._test_parse_build_orig
return new_method
@six.add_metaclass(PfpTestMeta)
class PfpTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
# create two versions of each test
unittest.TestCase.__init__(self, *args, **kwargs)
def _test_parse_build_with_string(self, *args, **kwargs):
kwargs["_stream"] = False
return self._test_parse_build_orig(*args, **kwargs)
def _test_parse_build(
self,
data,
template,
stdout=None,
debug=False,
predefines=False,
_stream=True,
printf=True,
):
if stdout is not None:
fake_stdout = sys.stdout = six.StringIO()
if _stream:
data = six.StringIO(data)
# defaults to LittleEndian
template = "LittleEndian();" + template
dom = pfp.parse(
data, template, debug=debug, predefines=predefines, printf=printf
)
if stdout is not None:
sys.stdout = sys.__stdout__
output = fake_stdout.getvalue()
self.assertEqual(output, stdout)
return dom
_test_parse_build_orig = _test_parse_build
| mit | -7,788,077,788,386,281,000 | 26.788235 | 79 | 0.574936 | false |
folti/subuser | logic/subuserlib/commands.py | 1 | 2023 | # -*- coding: utf-8 -*-
"""
This module helps us figure out which subuser subcommands can be called.
"""
#external imports
import os
#internal imports
import subuserlib.executablePath
import subuserlib.paths
def getBuiltIn():
"""
Get a list of the names of the built in subuser commands.
"""
try:
commands = set(os.listdir(subuserlib.paths.getSubuserCommandsDir()))
return [command[8:-3] for command in commands if command.endswith(".py") and command.startswith("subuser-")] # Filter out non-.py files and remove the .py suffixes and the "subuser-" prefixes.
except OSError:
return []
def getExternal():
"""
Return the list of "external" subuser commands. These are not built in commands but rather stand alone executables which appear in the user's $PATH and who's names start with "subuser-"
"""
def isPathToCommand(path):
directory, executableName = os.path.split(path)
return executableName.startswith("subuser-")
externalCommandPaths = subuserlib.executablePath.queryPATH(isPathToCommand)
externalCommands = []
subuserPrefixLength=len("subuser-")
for externalCommandPath in externalCommandPaths:
commandDir, executableName = os.path.split(externalCommandPath)
commandName = executableName[subuserPrefixLength:]
if commandName.endswith(".py"):
commandName=commandName[:-3]
externalCommands.append(commandName)
return list(set(externalCommands)) # remove duplicate entries
def getCommands():
"""
Returns a list of commands that may be called by the user.
"""
return list(set(getBuiltIn() + getExternal()))
def getPath(command):
builtInCommandPath = os.path.join(subuserlib.paths.getSubuserCommandsDir(),"subuser-" + command + ".py")
if os.path.exists(builtInCommandPath):
return (builtInCommandPath)
else:
externalCommandPath = subuserlib.executablePath.which("subuser-"+command)
if externalCommandPath:
return externalCommandPath
else:
return subuserlib.executablePath.which("subuser-"+command+".py")
| lgpl-3.0 | 4,134,394,350,383,250,000 | 35.781818 | 196 | 0.739001 | false |
abranches/backmonitor | backmonitor/protocol.py | 1 | 1832 | import logging
from twisted.internet.protocol import Factory, Protocol
from frame import decode_frame
from message import decode_message
log = logging.getLogger(__name__)
class ConnectionManager(object):
def __init__(self, backmonitor, addr):
self.backmonitor = backmonitor
self.addr = addr
self._buffer = bytes()
self._open = False
self.bytes_received = 0
self.frames_received = 0
@property
def open(self):
return self._open
def _on_data_received(self, data):
log.debug("_on_data_received(), data length=%d" % len(data))
self._buffer += data
while self._buffer:
consumed_bytes, frame = decode_frame(self._buffer)
if consumed_bytes == 0:
return
self.bytes_received += consumed_bytes
self._buffer = self._buffer[consumed_bytes:]
self._process_frame(frame)
def _process_frame(self, frame):
log.debug("Processing new frame")
message = decode_message(frame)
self.backmonitor.on_new_message(message)
self.frames_received += 1
class BackmonitorTwistedProtocol(Protocol):
def __init__(self, factory, conn_manager):
self.factory = factory
self.conn_manager = conn_manager
def connectionMade(self):
log.debug("New connection estabilished")
self.conn_manager._open = True
self.factory.connected_peers += 1
def dataReceived(self, data):
self.conn_manager._on_data_received(data)
class BackmonitorTwistedFactory(Factory):
def __init__(self, backmonitor):
self.connected_peers = 0
self.backmonitor = backmonitor
def buildProtocol(self, addr):
return BackmonitorTwistedProtocol(self, ConnectionManager(self.backmonitor, addr))
| apache-2.0 | 6,756,100,460,570,981,000 | 26.757576 | 90 | 0.637555 | false |
vntarasov/openpilot | selfdrive/registration.py | 1 | 2605 | import os
import json
from datetime import datetime, timedelta
from selfdrive.swaglog import cloudlog
from selfdrive.version import version, terms_version, training_version, get_git_commit, get_git_branch, get_git_remote
from common.hardware import HARDWARE
from common.api import api_get
from common.params import Params
from common.file_helpers import mkdirs_exists_ok
from common.basedir import PERSIST
def register():
params = Params()
params.put("Version", version)
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_git_commit(default=""))
params.put("GitBranch", get_git_branch(default=""))
params.put("GitRemote", get_git_remote(default=""))
params.put("SubscriberInfo", HARDWARE.get_subscriber_info())
# create a key for auth
# your private key is kept on your device persist partition and never sent to our servers
# do not erase your persist partition
if not os.path.isfile(PERSIST+"/comma/id_rsa.pub"):
cloudlog.warning("generating your personal RSA key")
mkdirs_exists_ok(PERSIST+"/comma")
assert os.system("openssl genrsa -out "+PERSIST+"/comma/id_rsa.tmp 2048") == 0
assert os.system("openssl rsa -in "+PERSIST+"/comma/id_rsa.tmp -pubout -out "+PERSIST+"/comma/id_rsa.tmp.pub") == 0
os.rename(PERSIST+"/comma/id_rsa.tmp", PERSIST+"/comma/id_rsa")
os.rename(PERSIST+"/comma/id_rsa.tmp.pub", PERSIST+"/comma/id_rsa.pub")
# make key readable by app users (ai.comma.plus.offroad)
os.chmod(PERSIST+'/comma/', 0o755)
os.chmod(PERSIST+'/comma/id_rsa', 0o744)
dongle_id = params.get("DongleId", encoding='utf8')
public_key = open(PERSIST+"/comma/id_rsa.pub").read()
# create registration token
# in the future, this key will make JWTs directly
private_key = open(PERSIST+"/comma/id_rsa").read()
# late import
import jwt
register_token = jwt.encode({'register': True, 'exp': datetime.utcnow() + timedelta(hours=1)}, private_key, algorithm='RS256')
try:
cloudlog.info("getting pilotauth")
resp = api_get("v2/pilotauth/", method='POST', timeout=15,
imei=HARDWARE.get_imei(0), imei2=HARDWARE.get_imei(1), serial=HARDWARE.get_serial(), public_key=public_key, register_token=register_token)
dongleauth = json.loads(resp.text)
dongle_id = dongleauth["dongle_id"]
params.put("DongleId", dongle_id)
return dongle_id
except Exception:
cloudlog.exception("failed to authenticate")
if dongle_id is not None:
return dongle_id
else:
return None
if __name__ == "__main__":
print(register())
| mit | 2,335,180,383,859,445,000 | 37.308824 | 157 | 0.70595 | false |
s1na/darkoob | darkoob/book/views.py | 1 | 2366 | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, render_to_response
from django.core.urlresolvers import reverse
from django.http import Http404
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.template import RequestContext
from django.db import transaction
from django.utils import simplejson
from darkoob.book.models import Book, Author
from darkoob.book.forms import NewReviewForm
from darkoob.social.views import common_context
from darkoob.migration.models import Migration
from darkoob.book.models import Review
def page(request, book_id, book_title):
try:
book = Book.objects.get(id=book_id)
except Book.DoesNotExist:
raise Http404
template = 'book/book_page.html'
reviews = Review.objects.filter(book=book).order_by("-rating_score")
count = range(1, len(reviews) + 1)
if request.is_ajax():
template = 'book/reviews.html'
context = {
'new_review_form': NewReviewForm(),
'book': book,
'rate': book.rating.get_rating(),
'reviews': reviews,
'count': count[::-1],
'migrations': Migration.objects.filter(book=book),
}
common_context(request, context)
m = Migration()
return render(request, template, context)
from avatar.templatetags import avatar_tags
def book_lookup(request):
results = []
if request.method == "GET":
if request.GET.has_key(u'query'):
value = request.GET[u'query']
model_results = Book.objects.filter(title__icontains=value)
results = [ {'book_title': x.title ,'book_id':x.id ,'photo': x.thumb.url , 'author_name': x.author_names() } for x in model_results]
to_json = []
jt = simplejson.dumps(results)
print jt
return HttpResponse(jt, mimetype='application/json')
def author_lookup(request):
results = []
if request.method == "GET":
if request.GET.has_key(u'query'):
value = request.GET[u'query']
model_results = Author.objects.filter(name__icontains=value)
results = [ x.name for x in model_results]
to_json = []
jt = simplejson.dumps(results)
print jt
return HttpResponse(jt, mimetype='application/json')
| mit | 4,478,514,506,639,604,000 | 30.546667 | 145 | 0.677515 | false |
GoogleCloudPlatform/PerfKitBenchmarker | perfkitbenchmarker/providers/openstack/swift.py | 1 | 4141 | # Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing classes related to Swift Storage Service."""
import os
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import object_storage_service
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
flags.DEFINE_boolean('openstack_swift_insecure', False,
'Allow swiftclient to access Swift service without \n'
'having to verify the SSL certificate')
FLAGS = flags.FLAGS
SWIFTCLIENT_LIB_VERSION = 'python-swiftclient_lib_version'
class SwiftStorageService(object_storage_service.ObjectStorageService):
"""Interface to OpenStack Swift."""
STORAGE_NAME = providers.OPENSTACK
def __init__(self):
self.swift_command_prefix = ''
def PrepareService(self, location):
openstack_creds_set = ('OS_AUTH_URL' in os.environ,
'OS_TENANT_NAME' in os.environ,
'OS_USERNAME' in os.environ,
'OS_PASSWORD' in os.environ,)
if not all(openstack_creds_set):
raise errors.Benchmarks.MissingObjectCredentialException(
'OpenStack credentials not found in environment variables')
self.swift_command_parts = [
'--os-auth-url', os.environ['OS_AUTH_URL'],
'--os-tenant-name', os.environ['OS_TENANT_NAME'],
'--os-username', os.environ['OS_USERNAME'],
'--os-password', os.environ['OS_PASSWORD']]
if FLAGS.openstack_swift_insecure:
self.swift_command_parts.append('--insecure')
self.swift_command_prefix = ' '.join(self.swift_command_parts)
def MakeBucket(self, bucket, raise_on_failure=True):
_, stderr, ret_code = vm_util.IssueCommand(
['swift'] + self.swift_command_parts + ['post', bucket],
raise_on_failure=False)
if ret_code and raise_on_failure:
raise errors.Benchmarks.BucketCreationError(stderr)
def DeleteBucket(self, bucket):
self.EmptyBucket(bucket)
vm_util.IssueCommand(
['swift'] + self.swift_command_parts + ['delete', bucket],
raise_on_failure=False)
def Copy(self, src_url, dst_url):
"""See base class."""
raise NotImplementedError()
def CopyToBucket(self, src_path, bucket, object_path):
"""See base class."""
raise NotImplementedError()
def MakeRemoteCliDownloadUrl(self, bucket, object_path):
"""See base class."""
raise NotImplementedError()
def GenerateCliDownloadFileCommand(self, src_url, local_path):
"""See base class."""
raise NotImplementedError()
def List(self, buckets):
"""See base class."""
raise NotImplementedError()
def EmptyBucket(self, bucket):
vm_util.IssueCommand(
['swift'] + self.swift_command_parts + ['delete', bucket],
raise_on_failure=False)
def PrepareVM(self, vm):
vm.Install('swift_client')
def CleanupVM(self, vm):
vm.Uninstall('swift_client')
vm.RemoteCommand('/usr/bin/yes | sudo pip uninstall absl-py')
def CLIUploadDirectory(self, vm, directory, file_names, bucket):
return vm.RemoteCommand(
'time swift %s upload %s %s'
% (self.swift_command_prefix, bucket, directory))
def CLIDownloadBucket(self, vm, bucket, objects, dest):
return vm.RemoteCommand(
'time swift %s download %s -D %s'
% (self.swift_command_prefix, bucket, dest))
def Metadata(self, vm):
return {SWIFTCLIENT_LIB_VERSION:
linux_packages.GetPipPackageVersion(vm, 'python-swiftclient')}
| apache-2.0 | 5,507,729,754,466,135,000 | 34.09322 | 75 | 0.683651 | false |
dgwartney-io/import-io-api-python | importio2/commands/__init__.py | 1 | 1297 | #
# Copyright 2017 Import.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from importio2.commands.ad_base import AdBase
from importio2.commands.ad_base import AdDatabase
from importio2.commands.change_ownership import ChangeOwnership
from importio2.commands.create_crawl_run import CreateCrawlRun
from importio2.commands.csv_download import CsvDownload
from importio2.commands.csv_to_crawl_run import CsvToCrawlRun
from importio2.commands.csv_to_db import CsvToDatabase
from importio2.commands.csv_to_json import CsvToJson
from importio2.commands.json_to_crawl_run import JsonToCrawlRun
from importio2.commands.run_sql import RunSql
from importio2.commands.sql_to_csv import SqlToCsv
from importio2.commands.date_to_epoch import Date2Epoch
from importio2.commands.upload_data import UploadData
| apache-2.0 | -5,356,048,302,238,832,000 | 45.321429 | 74 | 0.814958 | false |
cpennington/edx-platform | lms/djangoapps/courseware/module_render.py | 1 | 55817 | """
Module rendering
"""
import hashlib
import json
import logging
import textwrap
from collections import OrderedDict
from functools import partial
import six
from completion import waffle as completion_waffle
from completion.models import BlockCompletion
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.http import Http404, HttpResponse, HttpResponseForbidden
from django.middleware.csrf import CsrfViewMiddleware
from django.template.context_processors import csrf
from django.urls import reverse
from django.utils.text import slugify
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.csrf import csrf_exempt
from edx_django_utils.cache import RequestCache
from edx_django_utils.monitoring import set_custom_metrics_for_course_key, set_monitoring_transaction_name
from edx_proctoring.api import get_attempt_status_summary
from edx_proctoring.services import ProctoringService
from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication
from edx_when.field_data import DateLookupFieldData
from eventtracking import tracker
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from requests.auth import HTTPBasicAuth
from rest_framework.decorators import api_view
from rest_framework.exceptions import APIException
from six import text_type
from web_fragments.fragment import Fragment
from xblock.core import XBlock
from xblock.django.request import django_to_webob_request, webob_to_django_response
from xblock.exceptions import NoSuchHandlerError, NoSuchViewError
from xblock.reference.plugins import FSService
from xblock.runtime import KvsFieldData
import static_replace
from capa.xqueue_interface import XQueueInterface
from lms.djangoapps.courseware.access import get_user_role, has_access
from lms.djangoapps.courseware.entrance_exams import user_can_skip_entrance_exam, user_has_passed_entrance_exam
from lms.djangoapps.courseware.masquerade import (
MasqueradingKeyValueStore,
filter_displayed_blocks,
is_masquerading_as_specific_student,
setup_masquerade
)
from lms.djangoapps.courseware.model_data import DjangoKeyValueStore, FieldDataCache
from edxmako.shortcuts import render_to_string
from lms.djangoapps.courseware.field_overrides import OverrideFieldData
from lms.djangoapps.courseware.services import UserStateService
from lms.djangoapps.grades.api import GradesUtilService
from lms.djangoapps.grades.api import signals as grades_signals
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
from lms.djangoapps.lms_xblock.models import XBlockAsidesConfig
from lms.djangoapps.lms_xblock.runtime import LmsModuleSystem
from lms.djangoapps.verify_student.services import XBlockVerificationService
from openedx.core.djangoapps.bookmarks.services import BookmarksService
from openedx.core.djangoapps.crawlers.models import CrawlersConfig
from openedx.core.djangoapps.credit.services import CreditService
from openedx.core.djangoapps.util.user_utils import SystemUser
from openedx.core.djangolib.markup import HTML
from openedx.core.lib.api.authentication import BearerAuthenticationAllowInactiveUser
from openedx.core.lib.api.view_utils import view_auth_classes
from openedx.core.lib.gating.services import GatingService
from openedx.core.lib.license import wrap_with_license
from openedx.core.lib.url_utils import quote_slashes, unquote_slashes
from openedx.core.lib.xblock_utils import (
add_staff_markup,
get_aside_from_xblock,
hash_resource,
is_xblock_aside,
replace_course_urls,
replace_jump_to_id_urls,
replace_static_urls
)
from openedx.core.lib.xblock_utils import request_token as xblock_request_token
from openedx.core.lib.xblock_utils import wrap_xblock
from openedx.features.course_duration_limits.access import course_expiration_wrapper
from openedx.features.discounts.utils import offer_banner_wrapper
from student.models import anonymous_id_for_user, user_by_anonymous_id
from student.roles import CourseBetaTesterRole
from track import contexts
from util import milestones_helpers
from util.json_request import JsonResponse
from xblock_django.user_service import DjangoXBlockUserService
from xmodule.contentstore.django import contentstore
from xmodule.error_module import ErrorDescriptor, NonStaffErrorDescriptor
from xmodule.exceptions import NotFoundError, ProcessingError
from xmodule.lti_module import LTIModule
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.util.sandboxing import can_execute_unsafe_code, get_python_lib_zip
from xmodule.x_module import XModuleDescriptor
log = logging.getLogger(__name__)
if settings.XQUEUE_INTERFACE.get('basic_auth') is not None:
REQUESTS_AUTH = HTTPBasicAuth(*settings.XQUEUE_INTERFACE['basic_auth'])
else:
REQUESTS_AUTH = None
XQUEUE_INTERFACE = XQueueInterface(
settings.XQUEUE_INTERFACE['url'],
settings.XQUEUE_INTERFACE['django_auth'],
REQUESTS_AUTH,
)
# TODO: course_id and course_key are used interchangeably in this file, which is wrong.
# Some brave person should make the variable names consistently someday, but the code's
# coupled enough that it's kind of tricky--you've been warned!
class LmsModuleRenderError(Exception):
"""
An exception class for exceptions thrown by module_render that don't fit well elsewhere
"""
pass
def make_track_function(request):
'''
Make a tracking function that logs what happened.
For use in ModuleSystem.
'''
import track.views
def function(event_type, event):
return track.views.server_track(request, event_type, event, page='x_module')
return function
def toc_for_course(user, request, course, active_chapter, active_section, field_data_cache):
'''
Create a table of contents from the module store
Return format:
{ 'chapters': [
{'display_name': name, 'url_name': url_name, 'sections': SECTIONS, 'active': bool},
],
'previous_of_active_section': {..},
'next_of_active_section': {..}
}
where SECTIONS is a list
[ {'display_name': name, 'url_name': url_name,
'format': format, 'due': due, 'active' : bool, 'graded': bool}, ...]
where previous_of_active_section and next_of_active_section have information on the
next/previous sections of the active section.
active is set for the section and chapter corresponding to the passed
parameters, which are expected to be url_names of the chapter+section.
Everything else comes from the xml, or defaults to "".
chapters with name 'hidden' are skipped.
NOTE: assumes that if we got this far, user has access to course. Returns
None if this is not the case.
field_data_cache must include data from the course module and 2 levels of its descendants
'''
with modulestore().bulk_operations(course.id):
course_module = get_module_for_descriptor(
user, request, course, field_data_cache, course.id, course=course
)
if course_module is None:
return None, None, None
toc_chapters = list()
chapters = course_module.get_display_items()
# Check for content which needs to be completed
# before the rest of the content is made available
required_content = milestones_helpers.get_required_content(course.id, user)
# The user may not actually have to complete the entrance exam, if one is required
if user_can_skip_entrance_exam(user, course):
required_content = [content for content in required_content if not content == course.entrance_exam_id]
previous_of_active_section, next_of_active_section = None, None
last_processed_section, last_processed_chapter = None, None
found_active_section = False
for chapter in chapters:
# Only show required content, if there is required content
# chapter.hide_from_toc is read-only (bool)
# xss-lint: disable=python-deprecated-display-name
display_id = slugify(chapter.display_name_with_default_escaped)
local_hide_from_toc = False
if required_content:
if six.text_type(chapter.location) not in required_content:
local_hide_from_toc = True
# Skip the current chapter if a hide flag is tripped
if chapter.hide_from_toc or local_hide_from_toc:
continue
sections = list()
for section in chapter.get_display_items():
# skip the section if it is hidden from the user
if section.hide_from_toc:
continue
is_section_active = (chapter.url_name == active_chapter and section.url_name == active_section)
if is_section_active:
found_active_section = True
section_context = {
# xss-lint: disable=python-deprecated-display-name
'display_name': section.display_name_with_default_escaped,
'url_name': section.url_name,
'format': section.format if section.format is not None else '',
'due': section.due,
'active': is_section_active,
'graded': section.graded,
}
_add_timed_exam_info(user, course, section, section_context)
# update next and previous of active section, if applicable
if is_section_active:
if last_processed_section:
previous_of_active_section = last_processed_section.copy()
previous_of_active_section['chapter_url_name'] = last_processed_chapter.url_name
elif found_active_section and not next_of_active_section:
next_of_active_section = section_context.copy()
next_of_active_section['chapter_url_name'] = chapter.url_name
sections.append(section_context)
last_processed_section = section_context
last_processed_chapter = chapter
toc_chapters.append({
# xss-lint: disable=python-deprecated-display-name
'display_name': chapter.display_name_with_default_escaped,
'display_id': display_id,
'url_name': chapter.url_name,
'sections': sections,
'active': chapter.url_name == active_chapter
})
return {
'chapters': toc_chapters,
'previous_of_active_section': previous_of_active_section,
'next_of_active_section': next_of_active_section,
}
def _add_timed_exam_info(user, course, section, section_context):
"""
Add in rendering context if exam is a timed exam (which includes proctored)
"""
section_is_time_limited = (
getattr(section, 'is_time_limited', False) and
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False)
)
if section_is_time_limited:
# call into edx_proctoring subsystem
# to get relevant proctoring information regarding this
# level of the courseware
#
# This will return None, if (user, course_id, content_id)
# is not applicable
timed_exam_attempt_context = None
try:
timed_exam_attempt_context = get_attempt_status_summary(
user.id,
six.text_type(course.id),
six.text_type(section.location)
)
except Exception as ex: # pylint: disable=broad-except
# safety net in case something blows up in edx_proctoring
# as this is just informational descriptions, it is better
# to log and continue (which is safe) than to have it be an
# unhandled exception
log.exception(ex)
if timed_exam_attempt_context:
# yes, user has proctoring context about
# this level of the courseware
# so add to the accordion data context
section_context.update({
'proctoring': timed_exam_attempt_context,
})
def get_module(user, request, usage_key, field_data_cache,
position=None, log_if_not_found=True, wrap_xmodule_display=True,
grade_bucket_type=None, depth=0,
static_asset_path='', course=None, will_recheck_access=False):
"""
Get an instance of the xmodule class identified by location,
setting the state based on an existing StudentModule, or creating one if none
exists.
Arguments:
- user : User for whom we're getting the module
- request : current django HTTPrequest. Note: request.user isn't used for anything--all auth
and such works based on user.
- usage_key : A UsageKey object identifying the module to load
- field_data_cache : a FieldDataCache
- position : extra information from URL for user-specified
position within module
- log_if_not_found : If this is True, we log a debug message if we cannot find the requested xmodule.
- wrap_xmodule_display : If this is True, wrap the output display in a single div to allow for the
XModule javascript to be bound correctly
- depth : number of levels of descendents to cache when loading this module.
None means cache all descendents
- static_asset_path : static asset path to use (overrides descriptor's value); needed
by get_course_info_section, because info section modules
do not have a course as the parent module, and thus do not
inherit this lms key value.
- will_recheck_access : If True, the caller commits to re-checking access on each child XBlock
before rendering the content in order to display access error messages
to the user.
Returns: xmodule instance, or None if the user does not have access to the
module. If there's an error, will try to return an instance of ErrorModule
if possible. If not possible, return None.
"""
try:
descriptor = modulestore().get_item(usage_key, depth=depth)
return get_module_for_descriptor(user, request, descriptor, field_data_cache, usage_key.course_key,
position=position,
wrap_xmodule_display=wrap_xmodule_display,
grade_bucket_type=grade_bucket_type,
static_asset_path=static_asset_path,
course=course, will_recheck_access=will_recheck_access)
except ItemNotFoundError:
if log_if_not_found:
log.debug("Error in get_module: ItemNotFoundError")
return None
except: # pylint: disable=W0702
# Something has gone terribly wrong, but still not letting it turn into a 500.
log.exception("Error in get_module")
return None
def display_access_messages(user, block, view, frag, context): # pylint: disable=W0613
"""
An XBlock wrapper that replaces the content fragment with a fragment or message determined by
the has_access check.
"""
blocked_prior_sibling = RequestCache('display_access_messages_prior_sibling')
load_access = has_access(user, 'load', block, block.scope_ids.usage_id.course_key)
if load_access:
blocked_prior_sibling.delete(block.parent)
return frag
prior_sibling = blocked_prior_sibling.get_cached_response(block.parent)
if prior_sibling.is_found and prior_sibling.value.error_code == load_access.error_code:
return Fragment(u"")
else:
blocked_prior_sibling.set(block.parent, load_access)
if load_access.user_fragment:
msg_fragment = load_access.user_fragment
elif load_access.user_message:
msg_fragment = Fragment(textwrap.dedent(HTML(u"""\
<div>{}</div>
""").format(load_access.user_message)))
else:
msg_fragment = Fragment(u"")
if load_access.developer_message and has_access(user, 'staff', block, block.scope_ids.usage_id.course_key):
msg_fragment.content += textwrap.dedent(HTML(u"""\
<div>{}</div>
""").format(load_access.developer_message))
return msg_fragment
def get_xqueue_callback_url_prefix(request):
"""
Calculates default prefix based on request, but allows override via settings
This is separated from get_module_for_descriptor so that it can be called
by the LMS before submitting background tasks to run. The xqueue callbacks
should go back to the LMS, not to the worker.
"""
prefix = '{proto}://{host}'.format(
proto=request.META.get('HTTP_X_FORWARDED_PROTO', 'https' if request.is_secure() else 'http'),
host=request.get_host()
)
return settings.XQUEUE_INTERFACE.get('callback_url', prefix)
# pylint: disable=too-many-statements
def get_module_for_descriptor(user, request, descriptor, field_data_cache, course_key,
position=None, wrap_xmodule_display=True, grade_bucket_type=None,
static_asset_path='', disable_staff_debug_info=False,
course=None, will_recheck_access=False):
"""
Implements get_module, extracting out the request-specific functionality.
disable_staff_debug_info : If this is True, exclude staff debug information in the rendering of the module.
See get_module() docstring for further details.
"""
track_function = make_track_function(request)
xqueue_callback_url_prefix = get_xqueue_callback_url_prefix(request)
user_location = getattr(request, 'session', {}).get('country_code')
student_kvs = DjangoKeyValueStore(field_data_cache)
if is_masquerading_as_specific_student(user, course_key):
student_kvs = MasqueradingKeyValueStore(student_kvs, request.session)
student_data = KvsFieldData(student_kvs)
return get_module_for_descriptor_internal(
user=user,
descriptor=descriptor,
student_data=student_data,
course_id=course_key,
track_function=track_function,
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
position=position,
wrap_xmodule_display=wrap_xmodule_display,
grade_bucket_type=grade_bucket_type,
static_asset_path=static_asset_path,
user_location=user_location,
request_token=xblock_request_token(request),
disable_staff_debug_info=disable_staff_debug_info,
course=course,
will_recheck_access=will_recheck_access,
)
def get_module_system_for_user(
user,
student_data, # TODO
# Arguments preceding this comment have user binding, those following don't
descriptor,
course_id,
track_function,
xqueue_callback_url_prefix,
request_token,
position=None,
wrap_xmodule_display=True,
grade_bucket_type=None,
static_asset_path='',
user_location=None,
disable_staff_debug_info=False,
course=None,
will_recheck_access=False,
):
"""
Helper function that returns a module system and student_data bound to a user and a descriptor.
The purpose of this function is to factor out everywhere a user is implicitly bound when creating a module,
to allow an existing module to be re-bound to a user. Most of the user bindings happen when creating the
closures that feed the instantiation of ModuleSystem.
The arguments fall into two categories: those that have explicit or implicit user binding, which are user
and student_data, and those don't and are just present so that ModuleSystem can be instantiated, which
are all the other arguments. Ultimately, this isn't too different than how get_module_for_descriptor_internal
was before refactoring.
Arguments:
see arguments for get_module()
request_token (str): A token unique to the request use by xblock initialization
Returns:
(LmsModuleSystem, KvsFieldData): (module system, student_data) bound to, primarily, the user and descriptor
"""
def make_xqueue_callback(dispatch='score_update'):
"""
Returns fully qualified callback URL for external queueing system
"""
relative_xqueue_callback_url = reverse(
'xqueue_callback',
kwargs=dict(
course_id=text_type(course_id),
userid=str(user.id),
mod_id=text_type(descriptor.location),
dispatch=dispatch
),
)
return xqueue_callback_url_prefix + relative_xqueue_callback_url
# Default queuename is course-specific and is derived from the course that
# contains the current module.
# TODO: Queuename should be derived from 'course_settings.json' of each course
xqueue_default_queuename = descriptor.location.org + '-' + descriptor.location.course
xqueue = {
'interface': XQUEUE_INTERFACE,
'construct_callback': make_xqueue_callback,
'default_queuename': xqueue_default_queuename.replace(' ', '_'),
'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS
}
def inner_get_module(descriptor):
"""
Delegate to get_module_for_descriptor_internal() with all values except `descriptor` set.
Because it does an access check, it may return None.
"""
# TODO: fix this so that make_xqueue_callback uses the descriptor passed into
# inner_get_module, not the parent's callback. Add it as an argument....
return get_module_for_descriptor_internal(
user=user,
descriptor=descriptor,
student_data=student_data,
course_id=course_id,
track_function=track_function,
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
position=position,
wrap_xmodule_display=wrap_xmodule_display,
grade_bucket_type=grade_bucket_type,
static_asset_path=static_asset_path,
user_location=user_location,
request_token=request_token,
course=course,
will_recheck_access=will_recheck_access,
)
def get_event_handler(event_type):
"""
Return an appropriate function to handle the event.
Returns None if no special processing is required.
"""
handlers = {
'grade': handle_grade_event,
}
if completion_waffle.waffle().is_enabled(completion_waffle.ENABLE_COMPLETION_TRACKING):
handlers.update({
'completion': handle_completion_event,
'progress': handle_deprecated_progress_event,
})
return handlers.get(event_type)
def publish(block, event_type, event):
"""
A function that allows XModules to publish events.
"""
handle_event = get_event_handler(event_type)
if handle_event and not is_masquerading_as_specific_student(user, course_id):
handle_event(block, event)
else:
context = contexts.course_context_from_course_id(course_id)
if block.runtime.user_id:
context['user_id'] = block.runtime.user_id
context['asides'] = {}
for aside in block.runtime.get_asides(block):
if hasattr(aside, 'get_event_context'):
aside_event_info = aside.get_event_context(event_type, event)
if aside_event_info is not None:
context['asides'][aside.scope_ids.block_type] = aside_event_info
with tracker.get_tracker().context(event_type, context):
track_function(event_type, event)
def handle_completion_event(block, event):
"""
Submit a completion object for the block.
"""
if not completion_waffle.waffle().is_enabled(completion_waffle.ENABLE_COMPLETION_TRACKING):
raise Http404
else:
BlockCompletion.objects.submit_completion(
user=user,
block_key=block.scope_ids.usage_id,
completion=event['completion'],
)
def handle_grade_event(block, event):
"""
Submit a grade for the block.
"""
if not user.is_anonymous:
grades_signals.SCORE_PUBLISHED.send(
sender=None,
block=block,
user=user,
raw_earned=event['value'],
raw_possible=event['max_value'],
only_if_higher=event.get('only_if_higher'),
score_deleted=event.get('score_deleted'),
grader_response=event.get('grader_response')
)
def handle_deprecated_progress_event(block, event):
"""
DEPRECATED: Submit a completion for the block represented by the
progress event.
This exists to support the legacy progress extension used by
edx-solutions. New XBlocks should not emit these events, but instead
emit completion events directly.
"""
if not completion_waffle.waffle().is_enabled(completion_waffle.ENABLE_COMPLETION_TRACKING):
raise Http404
else:
requested_user_id = event.get('user_id', user.id)
if requested_user_id != user.id:
log.warning(u"{} tried to submit a completion on behalf of {}".format(user, requested_user_id))
return
# If blocks explicitly declare support for the new completion API,
# we expect them to emit 'completion' events,
# and we ignore the deprecated 'progress' events
# in order to avoid duplicate work and possibly conflicting semantics.
if not getattr(block, 'has_custom_completion', False):
BlockCompletion.objects.submit_completion(
user=user,
block_key=block.scope_ids.usage_id,
completion=1.0,
)
def rebind_noauth_module_to_user(module, real_user):
"""
A function that allows a module to get re-bound to a real user if it was previously bound to an AnonymousUser.
Will only work within a module bound to an AnonymousUser, e.g. one that's instantiated by the noauth_handler.
Arguments:
module (any xblock type): the module to rebind
real_user (django.contrib.auth.models.User): the user to bind to
Returns:
nothing (but the side effect is that module is re-bound to real_user)
"""
if user.is_authenticated:
err_msg = ("rebind_noauth_module_to_user can only be called from a module bound to "
"an anonymous user")
log.error(err_msg)
raise LmsModuleRenderError(err_msg)
field_data_cache_real_user = FieldDataCache.cache_for_descriptor_descendents(
course_id,
real_user,
module.descriptor,
asides=XBlockAsidesConfig.possible_asides(),
)
student_data_real_user = KvsFieldData(DjangoKeyValueStore(field_data_cache_real_user))
(inner_system, inner_student_data) = get_module_system_for_user(
user=real_user,
student_data=student_data_real_user, # These have implicit user bindings, rest of args considered not to
descriptor=module.descriptor,
course_id=course_id,
track_function=track_function,
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
position=position,
wrap_xmodule_display=wrap_xmodule_display,
grade_bucket_type=grade_bucket_type,
static_asset_path=static_asset_path,
user_location=user_location,
request_token=request_token,
course=course,
will_recheck_access=will_recheck_access,
)
module.descriptor.bind_for_student(
inner_system,
real_user.id,
[
partial(DateLookupFieldData, course_id=course_id, user=user),
partial(OverrideFieldData.wrap, real_user, course),
partial(LmsFieldData, student_data=inner_student_data),
],
)
module.descriptor.scope_ids = (
module.descriptor.scope_ids._replace(user_id=real_user.id)
)
module.scope_ids = module.descriptor.scope_ids # this is needed b/c NamedTuples are immutable
# now bind the module to the new ModuleSystem instance and vice-versa
module.runtime = inner_system
inner_system.xmodule_instance = module
# Build a list of wrapping functions that will be applied in order
# to the Fragment content coming out of the xblocks that are about to be rendered.
block_wrappers = []
if is_masquerading_as_specific_student(user, course_id):
block_wrappers.append(filter_displayed_blocks)
if settings.FEATURES.get("LICENSING", False):
block_wrappers.append(wrap_with_license)
# Wrap the output display in a single div to allow for the XModule
# javascript to be bound correctly
if wrap_xmodule_display is True:
block_wrappers.append(partial(
wrap_xblock,
'LmsRuntime',
extra_data={'course-id': text_type(course_id)},
usage_id_serializer=lambda usage_id: quote_slashes(text_type(usage_id)),
request_token=request_token,
))
# TODO (cpennington): When modules are shared between courses, the static
# prefix is going to have to be specific to the module, not the directory
# that the xml was loaded from
# Rewrite urls beginning in /static to point to course-specific content
block_wrappers.append(partial(
replace_static_urls,
getattr(descriptor, 'data_dir', None),
course_id=course_id,
static_asset_path=static_asset_path or descriptor.static_asset_path
))
# Allow URLs of the form '/course/' refer to the root of multicourse directory
# hierarchy of this course
block_wrappers.append(partial(replace_course_urls, course_id))
# this will rewrite intra-courseware links (/jump_to_id/<id>). This format
# is an improvement over the /course/... format for studio authored courses,
# because it is agnostic to course-hierarchy.
# NOTE: module_id is empty string here. The 'module_id' will get assigned in the replacement
# function, we just need to specify something to get the reverse() to work.
block_wrappers.append(partial(
replace_jump_to_id_urls,
course_id,
reverse('jump_to_id', kwargs={'course_id': text_type(course_id), 'module_id': ''}),
))
block_wrappers.append(partial(display_access_messages, user))
block_wrappers.append(partial(course_expiration_wrapper, user))
block_wrappers.append(partial(offer_banner_wrapper, user))
if settings.FEATURES.get('DISPLAY_DEBUG_INFO_TO_STAFF'):
if is_masquerading_as_specific_student(user, course_id):
# When masquerading as a specific student, we want to show the debug button
# unconditionally to enable resetting the state of the student we are masquerading as.
# We already know the user has staff access when masquerading is active.
staff_access = True
# To figure out whether the user has instructor access, we temporarily remove the
# masquerade_settings from the real_user. With the masquerading settings in place,
# the result would always be "False".
masquerade_settings = user.real_user.masquerade_settings
del user.real_user.masquerade_settings
user.real_user.masquerade_settings = masquerade_settings
else:
staff_access = has_access(user, 'staff', descriptor, course_id)
if staff_access:
block_wrappers.append(partial(add_staff_markup, user, disable_staff_debug_info))
# These modules store data using the anonymous_student_id as a key.
# To prevent loss of data, we will continue to provide old modules with
# the per-student anonymized id (as we have in the past),
# while giving selected modules a per-course anonymized id.
# As we have the time to manually test more modules, we can add to the list
# of modules that get the per-course anonymized id.
is_pure_xblock = isinstance(descriptor, XBlock) and not isinstance(descriptor, XModuleDescriptor)
module_class = getattr(descriptor, 'module_class', None)
is_lti_module = not is_pure_xblock and issubclass(module_class, LTIModule)
if (is_pure_xblock and not getattr(descriptor, 'requires_per_student_anonymous_id', False)) or is_lti_module:
anonymous_student_id = anonymous_id_for_user(user, course_id)
else:
anonymous_student_id = anonymous_id_for_user(user, None)
field_data = DateLookupFieldData(descriptor._field_data, course_id, user) # pylint: disable=protected-access
field_data = LmsFieldData(field_data, student_data)
user_is_staff = bool(has_access(user, u'staff', descriptor.location, course_id))
system = LmsModuleSystem(
track_function=track_function,
render_template=render_to_string,
static_url=settings.STATIC_URL,
xqueue=xqueue,
# TODO (cpennington): Figure out how to share info between systems
filestore=descriptor.runtime.resources_fs,
get_module=inner_get_module,
user=user,
debug=settings.DEBUG,
hostname=settings.SITE_NAME,
# TODO (cpennington): This should be removed when all html from
# a module is coming through get_html and is therefore covered
# by the replace_static_urls code below
replace_urls=partial(
static_replace.replace_static_urls,
data_directory=getattr(descriptor, 'data_dir', None),
course_id=course_id,
static_asset_path=static_asset_path or descriptor.static_asset_path,
),
replace_course_urls=partial(
static_replace.replace_course_urls,
course_key=course_id
),
replace_jump_to_id_urls=partial(
static_replace.replace_jump_to_id_urls,
course_id=course_id,
jump_to_id_base_url=reverse('jump_to_id', kwargs={'course_id': text_type(course_id), 'module_id': ''})
),
node_path=settings.NODE_PATH,
publish=publish,
anonymous_student_id=anonymous_student_id,
course_id=course_id,
cache=cache,
can_execute_unsafe_code=(lambda: can_execute_unsafe_code(course_id)),
get_python_lib_zip=(lambda: get_python_lib_zip(contentstore, course_id)),
# TODO: When we merge the descriptor and module systems, we can stop reaching into the mixologist (cpennington)
mixins=descriptor.runtime.mixologist._mixins, # pylint: disable=protected-access
wrappers=block_wrappers,
get_real_user=user_by_anonymous_id,
services={
'fs': FSService(),
'field-data': field_data,
'user': DjangoXBlockUserService(user, user_is_staff=user_is_staff),
'verification': XBlockVerificationService(),
'proctoring': ProctoringService(),
'milestones': milestones_helpers.get_service(),
'credit': CreditService(),
'bookmarks': BookmarksService(user=user),
'gating': GatingService(),
'grade_utils': GradesUtilService(course_id=course_id),
'user_state': UserStateService(),
},
get_user_role=lambda: get_user_role(user, course_id),
descriptor_runtime=descriptor._runtime, # pylint: disable=protected-access
rebind_noauth_module_to_user=rebind_noauth_module_to_user,
user_location=user_location,
request_token=request_token,
)
# pass position specified in URL to module through ModuleSystem
if position is not None:
try:
position = int(position)
except (ValueError, TypeError):
log.exception(u'Non-integer %r passed as position.', position)
position = None
system.set('position', position)
system.set(u'user_is_staff', user_is_staff)
system.set(u'user_is_admin', bool(has_access(user, u'staff', 'global')))
system.set(u'user_is_beta_tester', CourseBetaTesterRole(course_id).has_user(user))
system.set(u'days_early_for_beta', descriptor.days_early_for_beta)
# make an ErrorDescriptor -- assuming that the descriptor's system is ok
if has_access(user, u'staff', descriptor.location, course_id):
system.error_descriptor_class = ErrorDescriptor
else:
system.error_descriptor_class = NonStaffErrorDescriptor
return system, field_data
# TODO: Find all the places that this method is called and figure out how to
# get a loaded course passed into it
def get_module_for_descriptor_internal(user, descriptor, student_data, course_id,
track_function, xqueue_callback_url_prefix, request_token,
position=None, wrap_xmodule_display=True, grade_bucket_type=None,
static_asset_path='', user_location=None, disable_staff_debug_info=False,
course=None, will_recheck_access=False):
"""
Actually implement get_module, without requiring a request.
See get_module() docstring for further details.
Arguments:
request_token (str): A unique token for this request, used to isolate xblock rendering
"""
(system, student_data) = get_module_system_for_user(
user=user,
student_data=student_data, # These have implicit user bindings, the rest of args are considered not to
descriptor=descriptor,
course_id=course_id,
track_function=track_function,
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
position=position,
wrap_xmodule_display=wrap_xmodule_display,
grade_bucket_type=grade_bucket_type,
static_asset_path=static_asset_path,
user_location=user_location,
request_token=request_token,
disable_staff_debug_info=disable_staff_debug_info,
course=course,
will_recheck_access=will_recheck_access,
)
descriptor.bind_for_student(
system,
user.id,
[
partial(DateLookupFieldData, course_id=course_id, user=user),
partial(OverrideFieldData.wrap, user, course),
partial(LmsFieldData, student_data=student_data),
],
)
descriptor.scope_ids = descriptor.scope_ids._replace(user_id=user.id)
# Do not check access when it's a noauth request.
# Not that the access check needs to happen after the descriptor is bound
# for the student, since there may be field override data for the student
# that affects xblock visibility.
user_needs_access_check = getattr(user, 'known', True) and not isinstance(user, SystemUser)
if user_needs_access_check:
access = has_access(user, 'load', descriptor, course_id)
# A descriptor should only be returned if either the user has access, or the user doesn't have access, but
# the failed access has a message for the user and the caller of this function specifies it will check access
# again. This allows blocks to show specific error message or upsells when access is denied.
caller_will_handle_access_error = (
not access
and will_recheck_access
and (access.user_message or access.user_fragment)
)
if access or caller_will_handle_access_error:
return descriptor
return None
return descriptor
def load_single_xblock(request, user_id, course_id, usage_key_string, course=None, will_recheck_access=False):
"""
Load a single XBlock identified by usage_key_string.
"""
usage_key = UsageKey.from_string(usage_key_string)
course_key = CourseKey.from_string(course_id)
usage_key = usage_key.map_into_course(course_key)
user = User.objects.get(id=user_id)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_key,
user,
modulestore().get_item(usage_key),
depth=0,
)
instance = get_module(
user,
request,
usage_key,
field_data_cache,
grade_bucket_type='xqueue',
course=course,
will_recheck_access=will_recheck_access
)
if instance is None:
msg = u"No module {0} for user {1}--access denied?".format(usage_key_string, user)
log.debug(msg)
raise Http404
return instance
@csrf_exempt
def xqueue_callback(request, course_id, userid, mod_id, dispatch):
'''
Entry point for graded results from the queueing system.
'''
data = request.POST.copy()
# Test xqueue package, which we expect to be:
# xpackage = {'xqueue_header': json.dumps({'lms_key':'secretkey',...}),
# 'xqueue_body' : 'Message from grader'}
for key in ['xqueue_header', 'xqueue_body']:
if key not in data:
raise Http404
header = json.loads(data['xqueue_header'])
if not isinstance(header, dict) or 'lms_key' not in header:
raise Http404
course_key = CourseKey.from_string(course_id)
with modulestore().bulk_operations(course_key):
course = modulestore().get_course(course_key, depth=0)
instance = load_single_xblock(request, userid, course_id, mod_id, course=course)
# Transfer 'queuekey' from xqueue response header to the data.
# This is required to use the interface defined by 'handle_ajax'
data.update({'queuekey': header['lms_key']})
# We go through the "AJAX" path
# So far, the only dispatch from xqueue will be 'score_update'
try:
# Can ignore the return value--not used for xqueue_callback
instance.handle_ajax(dispatch, data)
# Save any state that has changed to the underlying KeyValueStore
instance.save()
except:
log.exception("error processing ajax call")
raise
return HttpResponse("")
@csrf_exempt
@xframe_options_exempt
def handle_xblock_callback_noauth(request, course_id, usage_id, handler, suffix=None):
"""
Entry point for unauthenticated XBlock handlers.
"""
request.user.known = False
course_key = CourseKey.from_string(course_id)
with modulestore().bulk_operations(course_key):
course = modulestore().get_course(course_key, depth=0)
return _invoke_xblock_handler(request, course_id, usage_id, handler, suffix, course=course)
@csrf_exempt
@xframe_options_exempt
def handle_xblock_callback(request, course_id, usage_id, handler, suffix=None):
"""
Generic view for extensions. This is where AJAX calls go.
Arguments:
request (Request): Django request.
course_id (str): Course containing the block
usage_id (str)
handler (str)
suffix (str)
Raises:
HttpResponseForbidden: If the request method is not `GET` and user is not authenticated.
Http404: If the course is not found in the modulestore.
"""
# In this case, we are using Session based authentication, so we need to check CSRF token.
if request.user.is_authenticated:
error = CsrfViewMiddleware().process_view(request, None, (), {})
if error:
return error
# We are reusing DRF logic to provide support for JWT and Oauth2. We abandoned the idea of using DRF view here
# to avoid introducing backwards-incompatible changes.
# You can see https://github.com/edx/XBlock/pull/383 for more details.
else:
authentication_classes = (JwtAuthentication, BearerAuthenticationAllowInactiveUser)
authenticators = [auth() for auth in authentication_classes]
for authenticator in authenticators:
try:
user_auth_tuple = authenticator.authenticate(request)
except APIException:
log.exception(
u"XBlock handler %r failed to authenticate with %s", handler, authenticator.__class__.__name__
)
else:
if user_auth_tuple is not None:
request.user, _ = user_auth_tuple
break
# NOTE (CCB): Allow anonymous GET calls (e.g. for transcripts). Modifying this view is simpler than updating
# the XBlocks to use `handle_xblock_callback_noauth`, which is practically identical to this view.
if request.method != 'GET' and not (request.user and request.user.is_authenticated):
return HttpResponseForbidden('Unauthenticated')
request.user.known = request.user.is_authenticated
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise Http404(u'{} is not a valid course key'.format(course_id))
with modulestore().bulk_operations(course_key):
try:
course = modulestore().get_course(course_key)
except ItemNotFoundError:
raise Http404(u'{} does not exist in the modulestore'.format(course_id))
return _invoke_xblock_handler(request, course_id, usage_id, handler, suffix, course=course)
def get_module_by_usage_id(request, course_id, usage_id, disable_staff_debug_info=False, course=None):
"""
Gets a module instance based on its `usage_id` in a course, for a given request/user
Returns (instance, tracking_context)
"""
user = request.user
try:
course_id = CourseKey.from_string(course_id)
usage_key = UsageKey.from_string(unquote_slashes(usage_id)).map_into_course(course_id)
except InvalidKeyError:
raise Http404("Invalid location")
try:
descriptor = modulestore().get_item(usage_key)
descriptor_orig_usage_key, descriptor_orig_version = modulestore().get_block_original_usage(usage_key)
except ItemNotFoundError:
log.warn(
u"Invalid location for course id %s: %s",
usage_key.course_key,
usage_key
)
raise Http404
tracking_context = {
'module': {
# xss-lint: disable=python-deprecated-display-name
'display_name': descriptor.display_name_with_default_escaped,
'usage_key': six.text_type(descriptor.location),
}
}
# For blocks that are inherited from a content library, we add some additional metadata:
if descriptor_orig_usage_key is not None:
tracking_context['module']['original_usage_key'] = six.text_type(descriptor_orig_usage_key)
tracking_context['module']['original_usage_version'] = six.text_type(descriptor_orig_version)
unused_masquerade, user = setup_masquerade(request, course_id, has_access(user, 'staff', descriptor, course_id))
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id,
user,
descriptor,
read_only=CrawlersConfig.is_crawler(request),
)
instance = get_module_for_descriptor(
user,
request,
descriptor,
field_data_cache,
usage_key.course_key,
disable_staff_debug_info=disable_staff_debug_info,
course=course
)
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
log.debug(u"No module %s for user %s -- access denied?", usage_key, user)
raise Http404
return (instance, tracking_context)
def _invoke_xblock_handler(request, course_id, usage_id, handler, suffix, course=None):
"""
Invoke an XBlock handler, either authenticated or not.
Arguments:
request (HttpRequest): the current request
course_id (str): A string of the form org/course/run
usage_id (str): A string of the form i4x://org/course/category/name@revision
handler (str): The name of the handler to invoke
suffix (str): The suffix to pass to the handler when invoked
"""
# Check submitted files
files = request.FILES or {}
error_msg = _check_files_limits(files)
if error_msg:
return JsonResponse({'success': error_msg}, status=413)
# Make a CourseKey from the course_id, raising a 404 upon parse error.
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise Http404
set_custom_metrics_for_course_key(course_key)
with modulestore().bulk_operations(course_key):
try:
usage_key = UsageKey.from_string(unquote_slashes(usage_id))
except InvalidKeyError:
raise Http404
if is_xblock_aside(usage_key):
# Get the usage key for the block being wrapped by the aside (not the aside itself)
block_usage_key = usage_key.usage_key
else:
block_usage_key = usage_key
instance, tracking_context = get_module_by_usage_id(
request, course_id, six.text_type(block_usage_key), course=course
)
# Name the transaction so that we can view XBlock handlers separately in
# New Relic. The suffix is necessary for XModule handlers because the
# "handler" in those cases is always just "xmodule_handler".
nr_tx_name = "{}.{}".format(instance.__class__.__name__, handler)
nr_tx_name += "/{}".format(suffix) if (suffix and handler == "xmodule_handler") else ""
set_monitoring_transaction_name(nr_tx_name, group="Python/XBlock/Handler")
tracking_context_name = 'module_callback_handler'
req = django_to_webob_request(request)
try:
with tracker.get_tracker().context(tracking_context_name, tracking_context):
if is_xblock_aside(usage_key):
# In this case, 'instance' is the XBlock being wrapped by the aside, so
# the actual aside instance needs to be retrieved in order to invoke its
# handler method.
handler_instance = get_aside_from_xblock(instance, usage_key.aside_type)
else:
handler_instance = instance
resp = handler_instance.handle(handler, req, suffix)
if suffix == 'problem_check' \
and course \
and getattr(course, 'entrance_exam_enabled', False) \
and getattr(instance, 'in_entrance_exam', False):
ee_data = {'entrance_exam_passed': user_has_passed_entrance_exam(request.user, course)}
resp = append_data_to_webob_response(resp, ee_data)
except NoSuchHandlerError:
log.exception(u"XBlock %s attempted to access missing handler %r", instance, handler)
raise Http404
# If we can't find the module, respond with a 404
except NotFoundError:
log.exception("Module indicating to user that request doesn't exist")
raise Http404
# For XModule-specific errors, we log the error and respond with an error message
except ProcessingError as err:
log.warning("Module encountered an error while processing AJAX call",
exc_info=True)
return JsonResponse({'success': err.args[0]}, status=200)
# If any other error occurred, re-raise it to trigger a 500 response
except Exception:
log.exception("error executing xblock handler")
raise
return webob_to_django_response(resp)
@api_view(['GET'])
@view_auth_classes(is_authenticated=True)
def xblock_view(request, course_id, usage_id, view_name):
"""
Returns the rendered view of a given XBlock, with related resources
Returns a json object containing two keys:
html: The rendered html of the view
resources: A list of tuples where the first element is the resource hash, and
the second is the resource description
"""
if not settings.FEATURES.get('ENABLE_XBLOCK_VIEW_ENDPOINT', False):
log.warn("Attempt to use deactivated XBlock view endpoint -"
" see FEATURES['ENABLE_XBLOCK_VIEW_ENDPOINT']")
raise Http404
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise Http404("Invalid location")
with modulestore().bulk_operations(course_key):
course = modulestore().get_course(course_key)
instance, _ = get_module_by_usage_id(request, course_id, usage_id, course=course)
try:
fragment = instance.render(view_name, context=request.GET)
except NoSuchViewError:
log.exception(u"Attempt to render missing view on %s: %s", instance, view_name)
raise Http404
hashed_resources = OrderedDict()
for resource in fragment.resources:
hashed_resources[hash_resource(resource)] = resource
return JsonResponse({
'html': fragment.content,
'resources': list(hashed_resources.items()),
'csrf_token': six.text_type(csrf(request)['csrf_token']),
})
def _check_files_limits(files):
"""
Check if the files in a request are under the limits defined by
`settings.MAX_FILEUPLOADS_PER_INPUT` and
`settings.STUDENT_FILEUPLOAD_MAX_SIZE`.
Returns None if files are correct or an error messages otherwise.
"""
for fileinput_id in files.keys():
inputfiles = files.getlist(fileinput_id)
# Check number of files submitted
if len(inputfiles) > settings.MAX_FILEUPLOADS_PER_INPUT:
msg = u'Submission aborted! Maximum %d files may be submitted at once' % \
settings.MAX_FILEUPLOADS_PER_INPUT
return msg
# Check file sizes
for inputfile in inputfiles:
if inputfile.size > settings.STUDENT_FILEUPLOAD_MAX_SIZE: # Bytes
msg = u'Submission aborted! Your file "%s" is too large (max size: %d MB)' % \
(inputfile.name, settings.STUDENT_FILEUPLOAD_MAX_SIZE / (1000 ** 2))
return msg
return None
def append_data_to_webob_response(response, data):
"""
Appends data to a JSON webob response.
Arguments:
response (webob response object): the webob response object that needs to be modified
data (dict): dictionary containing data that needs to be appended to response body
Returns:
(webob response object): webob response with updated body.
"""
if getattr(response, 'content_type', None) == 'application/json':
json_input = response.body.decode('utf-8') if isinstance(response.body, bytes) else response.body
response_data = json.loads(json_input)
response_data.update(data)
response.body = json.dumps(response_data).encode('utf-8')
return response
| agpl-3.0 | -4,936,744,408,799,270,000 | 41.608397 | 119 | 0.650572 | false |
wcmitchell/insights-core | insights/parsers/rsyslog_conf.py | 1 | 2424 | """
RsyslogConf - file ``/etc/rsyslog.conf``
========================================
The rsyslog configuration files can include statements with two different
line based formats along with snippets of 'RainerScript' that can span
multiple lines.
See http://www.rsyslog.com/doc/master/configuration/basic_structure.html#statement-types
Due to high parsing complexity, this parser presents a simple line-based
view of the file that meets the needs of the current rules.
Example:
>>> content = '''
... :fromhost-ip, regex, "10.0.0.[0-9]" /tmp/my_syslog.log
... $ModLoad imtcp
... $InputTCPServerRun 10514"
... '''.strip()
>>> from insights.tests import context_wrap
>>> rsl = RsyslogConf(context_wrap(content))
>>> len(rsl)
3
>>> len(list(rsl))
3
>>> any('imtcp' in n for n in rsl)
True
"""
from .. import Parser, parser, get_active_lines
import re
from insights.specs import rsyslog_conf
@parser(rsyslog_conf)
class RsyslogConf(Parser):
"""
Parses `/etc/rsyslog.conf` info simple lines.
Skips lines that begin with hash ("#") or are only whitespace.
Attributes:
data (list): List of lines in the file that don't start
with '#' and aren't whitespace.
config_items(dict): Configuration items opportunistically found in the
configuration file, with their values as given.
"""
def parse_content(self, content):
self.data = get_active_lines(content)
self.config_items = {}
# Config items are e.g. "$Word value #optional comment"
config_re = re.compile(r'^\s*\$(?P<name>\S+)\s+(?P<value>.*?)(?:\s+#.*)?$')
for line in self.data:
lstrip = line.strip()
match = config_re.match(lstrip)
if match:
self.config_items[match.group('name')] = match.group('value')
def config_val(self, item, default=None):
"""
Return the given configuration item, or the default if not defined.
Parameters:
item(str): The configuration item name
default: The default if the item is not found (defaults to None)
Returns:
The related value in the `config_items` dictionary.
"""
return self.config_items.get(item, default)
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
yield d
| apache-2.0 | -6,795,303,004,103,961,000 | 29.683544 | 88 | 0.610149 | false |
kieranjol/IFIscripts | seq.py | 1 | 1751 | #!/usr/bin/env python
import subprocess
import sys
import os
import argparse
from glob import glob
parser = argparse.ArgumentParser(description='Generate v210/mov file from image sequence.'
'Written by Kieran O\'Leary.')
parser.add_argument('input', help='file path of parent directory')
parser.add_argument('-p', action='store_true', help='Use the Apple ProRes 4:2:2 codec instead of v210')
parser.add_argument('-f', action='store_true', help='choose an alternative framerate')
args = parser.parse_args()
source_directory = args.input
if not os.path.isdir(args.input):
print('Please provide a directory as input, not a file')
sys.exit()
os.chdir(source_directory)
images = (
glob('*.tif') +
glob('*.tiff') +
glob('*.dpx')
)
extension = os.path.splitext(images[0])[1]
numberless_filename = images[0].split("_")[0:-1]
ffmpeg_friendly_name = ''
counter = 0
while counter <len(numberless_filename) :
ffmpeg_friendly_name += numberless_filename[counter] + '_'
counter += 1
dirname = os.path.dirname(source_directory)
output = dirname + '/%s.mov' % os.path.split(source_directory)[-1]
ffmpeg_friendly_name += '%06d' + extension
codec = 'v210'
if args.p:
codec = 'prores'
#the sript will choose 24fps as default
cmd = ['ffmpeg','-f','image2','-framerate','24', '-i', ffmpeg_friendly_name,'-c:v',codec,output]
#adding the choice of an alternative fps here through argsparse
if args.f:
fps = raw_input('what alternative framerate do you require? 16,18,21,25?')
cmd = ['ffmpeg','-f','image2','-framerate',fps, '-i', ffmpeg_friendly_name,'-c:v',codec,output]
print cmd
subprocess.call(cmd)
print 'Output file is located in %s' % output
| mit | 9,195,344,782,524,252,000 | 34.02 | 103 | 0.667047 | false |
quantum13/django-crypto-paid-chat | cryptochat/wsgi.py | 1 | 1431 | """
WSGI config for cryptochat project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "cryptochat.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cryptochat.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-3-clause | -6,685,163,713,553,861,000 | 43.71875 | 79 | 0.794549 | false |
apophys/freeipa | pylint_plugins.py | 1 | 10988 | #
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function
import copy
import os.path
import sys
import textwrap
from astroid import MANAGER, register_module_extender
from astroid import scoped_nodes
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
from pylint.interfaces import IAstroidChecker
from astroid.builder import AstroidBuilder
def register(linter):
linter.register_checker(IPAChecker(linter))
def _warning_already_exists(cls, member):
print(
"WARNING: member '{member}' in '{cls}' already exists".format(
cls="{}.{}".format(cls.root().name, cls.name), member=member),
file=sys.stderr
)
def fake_class(name_or_class_obj, members=()):
if isinstance(name_or_class_obj, scoped_nodes.Class):
cl = name_or_class_obj
else:
cl = scoped_nodes.Class(name_or_class_obj, None)
for m in members:
if isinstance(m, str):
if m in cl.locals:
_warning_already_exists(cl, m)
else:
cl.locals[m] = [scoped_nodes.Class(m, None)]
elif isinstance(m, dict):
for key, val in m.items():
assert isinstance(key, str), "key must be string"
if key in cl.locals:
_warning_already_exists(cl, key)
fake_class(cl.locals[key], val)
else:
cl.locals[key] = [fake_class(key, val)]
else:
# here can be used any astroid type
if m.name in cl.locals:
_warning_already_exists(cl, m.name)
else:
cl.locals[m.name] = [copy.copy(m)]
return cl
fake_backend = {'Backend': [
{'wsgi_dispatch': ['mount']},
]}
NAMESPACE_ATTRS = ['Command', 'Object', 'Method', fake_backend, 'Updater',
'Advice']
fake_api_env = {'env': [
'host',
'realm',
'session_auth_duration',
'session_duration_type',
'kinit_lifetime',
]}
# this is due ipaserver.rpcserver.KerberosSession where api is undefined
fake_api = {'api': [fake_api_env] + NAMESPACE_ATTRS}
# 'class': ['generated', 'properties']
ipa_class_members = {
# Python standard library & 3rd party classes
'socket._socketobject': ['sendall'],
# IPA classes
'ipalib.base.NameSpace': [
'add',
'mod',
'del',
'show',
'find'
],
'ipalib.cli.Collector': ['__options'],
'ipalib.config.Env': [
{'__d': ['get']},
{'__done': ['add']},
'xmlrpc_uri',
'validate_api',
'startup_traceback',
'verbose',
'debug',
'server',
{'domain': dir(str)},
],
'ipalib.errors.ACIError': [
'info',
],
'ipalib.errors.ConversionError': [
'error',
],
'ipalib.errors.DatabaseError': [
'desc',
],
'ipalib.errors.NetworkError': [
'error',
],
'ipalib.errors.NotFound': [
'reason',
],
'ipalib.errors.PublicError': [
'msg',
'strerror',
'kw',
],
'ipalib.errors.SingleMatchExpected': [
'found',
],
'ipalib.errors.SkipPluginModule': [
'reason',
],
'ipalib.errors.ValidationError': [
'error',
],
'ipalib.errors.SchemaUpToDate': [
'fingerprint',
'ttl',
],
'ipalib.messages.PublicMessage': [
'msg',
'strerror',
'type',
'kw',
],
'ipalib.parameters.Param': [
'cli_name',
'cli_short_name',
'label',
'default',
'doc',
'required',
'multivalue',
'primary_key',
'normalizer',
'default_from',
'autofill',
'query',
'attribute',
'include',
'exclude',
'flags',
'hint',
'alwaysask',
'sortorder',
'option_group',
'no_convert',
'deprecated',
],
'ipalib.parameters.Bool': [
'truths',
'falsehoods'],
'ipalib.parameters.Data': [
'minlength',
'maxlength',
'length',
'pattern',
'pattern_errmsg',
],
'ipalib.parameters.Str': ['noextrawhitespace'],
'ipalib.parameters.Password': ['confirm'],
'ipalib.parameters.File': ['stdin_if_missing'],
'ipalib.parameters.Enum': ['values'],
'ipalib.parameters.Number': [
'minvalue',
'maxvalue',
],
'ipalib.parameters.Decimal': [
'precision',
'exponential',
'numberclass',
],
'ipalib.parameters.DNSNameParam': [
'only_absolute',
'only_relative',
],
'ipalib.parameters.Principal': [
'require_service',
],
'ipalib.plugable.API': [
fake_api_env,
] + NAMESPACE_ATTRS,
'ipalib.plugable.Plugin': [
'Object',
'Method',
'Updater',
'Advice',
],
'ipalib.util.ForwarderValidationError': [
'msg',
],
'ipaserver.plugins.dns.DNSRecord': [
'validatedns',
'normalizedns',
],
'ipaserver.rpcserver.KerberosSession': [
fake_api,
],
'ipatests.test_integration.base.IntegrationTest': [
'domain',
{'master': [
{'config': [
{'dirman_password': dir(str)},
{'admin_password': dir(str)},
{'admin_name': dir(str)},
{'dns_forwarder': dir(str)},
{'test_dir': dir(str)},
{'ad_admin_name': dir(str)},
{'ad_admin_password': dir(str)},
{'domain_level': dir(str)},
]},
{'domain': [
{'realm': dir(str)},
{'name': dir(str)},
]},
'hostname',
'ip',
'collect_log',
{'run_command': [
{'stdout_text': dir(str)},
'stderr_text',
'returncode',
]},
{'transport': ['put_file', 'file_exists']},
'put_file_contents',
'get_file_contents',
'ldap_connect',
]},
'replicas',
'clients',
'ad_domains',
]
}
def fix_ipa_classes(cls):
class_name_with_module = "{}.{}".format(cls.root().name, cls.name)
if class_name_with_module in ipa_class_members:
fake_class(cls, ipa_class_members[class_name_with_module])
MANAGER.register_transform(scoped_nodes.Class, fix_ipa_classes)
def pytest_config_transform():
"""pylint.config attribute
"""
return AstroidBuilder(MANAGER).string_build(textwrap.dedent('''
from _pytest.config import get_config
config = get_config()
'''))
register_module_extender(MANAGER, 'pytest', pytest_config_transform)
def ipaplatform_constants_transform():
return AstroidBuilder(MANAGER).string_build(textwrap.dedent('''
from ipaplatform.base.constants import constants
__all__ = ('constants',)
'''))
def ipaplatform_paths_transform():
return AstroidBuilder(MANAGER).string_build(textwrap.dedent('''
from ipaplatform.base.paths import paths
__all__ = ('paths',)
'''))
def ipaplatform_services_transform():
return AstroidBuilder(MANAGER).string_build(textwrap.dedent('''
from ipaplatform.base.services import knownservices
from ipaplatform.base.services import timedate_services
from ipaplatform.base.services import service
from ipaplatform.base.services import wellknownservices
from ipaplatform.base.services import wellknownports
__all__ = ('knownservices', 'timedate_services', 'service',
'wellknownservices', 'wellknownports')
'''))
def ipaplatform_tasks_transform():
return AstroidBuilder(MANAGER).string_build(textwrap.dedent('''
from ipaplatform.base.tasks import tasks
__all__ = ('tasks',)
'''))
register_module_extender(MANAGER, 'ipaplatform.constants',
ipaplatform_constants_transform)
register_module_extender(MANAGER, 'ipaplatform.paths',
ipaplatform_paths_transform)
register_module_extender(MANAGER, 'ipaplatform.services',
ipaplatform_services_transform)
register_module_extender(MANAGER, 'ipaplatform.tasks',
ipaplatform_tasks_transform)
class IPAChecker(BaseChecker):
__implements__ = IAstroidChecker
name = 'ipa'
msgs = {
'W9901': (
'Forbidden import %s (can\'t import from %s in %s)',
'ipa-forbidden-import',
'Used when an forbidden import is detected.',
),
}
options = (
(
'forbidden-imports',
{
'default': '',
'type': 'csv',
'metavar': '<path>[:<module>[:<module>...]][,<path>...]',
'help': 'Modules which are forbidden to be imported in the '
'given paths',
},
),
)
priority = -1
def open(self):
self._dir = os.path.abspath(os.path.dirname(__file__))
self._forbidden_imports = {self._dir: []}
for forbidden_import in self.config.forbidden_imports:
forbidden_import = forbidden_import.split(':')
path = os.path.join(self._dir, forbidden_import[0])
path = os.path.abspath(path)
modules = forbidden_import[1:]
self._forbidden_imports[path] = modules
self._forbidden_imports_stack = []
def _get_forbidden_import_rule(self, node):
path = node.path
if path:
path = os.path.abspath(path)
while path.startswith(self._dir):
if path in self._forbidden_imports:
return path
path = os.path.dirname(path)
return self._dir
def visit_module(self, node):
self._forbidden_imports_stack.append(
self._get_forbidden_import_rule(node))
def leave_module(self, node):
self._forbidden_imports_stack.pop()
def _check_forbidden_imports(self, node, names):
path = self._forbidden_imports_stack[-1]
relpath = os.path.relpath(path, self._dir)
modules = self._forbidden_imports[path]
for module in modules:
module_prefix = module + '.'
for name in names:
if name == module or name.startswith(module_prefix):
self.add_message('ipa-forbidden-import',
args=(name, module, relpath), node=node)
@check_messages('ipa-forbidden-import')
def visit_import(self, node):
names = [n[0] for n in node.names]
self._check_forbidden_imports(node, names)
@check_messages('ipa-forbidden-import')
def visit_importfrom(self, node):
names = ['{}.{}'.format(node.modname, n[0]) for n in node.names]
self._check_forbidden_imports(node, names)
| gpl-3.0 | 3,321,435,523,477,805,600 | 27.246787 | 77 | 0.545777 | false |
solvo/organilab | src/authentication/migrations/0001_initial.py | 1 | 1044 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-21 07:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FeedbackEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('explanation', models.TextField(blank=True, verbose_name='Explanation')),
('related_file', models.FileField(blank=True, upload_to='media/feedback_entries/', verbose_name='Related file')),
],
options={
'verbose_name_plural': 'Feedback entries',
'permissions': (('view_feedbackentry', 'Can see available feed back entry'),),
'verbose_name': 'Feedback entry',
},
),
]
| gpl-3.0 | 7,104,996,995,801,698,000 | 33.8 | 129 | 0.577586 | false |
tensorflow/model-optimization | tensorflow_model_optimization/python/core/sparsity/keras/test_utils.py | 1 | 5637 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utility to generate models for testing."""
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow_model_optimization.python.core.sparsity.keras import prune
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_wrapper
keras = tf.keras
l = keras.layers
def _build_mnist_layer_list():
return [
l.Conv2D(
32, 5, padding='same', activation='relu', input_shape=(28, 28, 1)),
l.MaxPooling2D((2, 2), (2, 2), padding='same'),
l.BatchNormalization(),
l.Conv2D(64, 5, padding='same', activation='relu'),
l.MaxPooling2D((2, 2), (2, 2), padding='same'),
l.Flatten(),
l.Dense(1024, activation='relu'),
l.Dropout(0.4),
l.Dense(10, activation='softmax')
]
def _build_mnist_sequential_model():
return keras.Sequential(_build_mnist_layer_list())
def _build_mnist_functional_model():
# pylint: disable=missing-docstring
inp = keras.Input(shape=(28, 28, 1))
x = l.Conv2D(32, 5, padding='same', activation='relu')(inp)
x = l.MaxPooling2D((2, 2), (2, 2), padding='same')(x)
x = l.BatchNormalization()(x)
x = l.Conv2D(64, 5, padding='same', activation='relu')(x)
x = l.MaxPooling2D((2, 2), (2, 2), padding='same')(x)
x = l.Flatten()(x)
x = l.Dense(1024, activation='relu')(x)
x = l.Dropout(0.4)(x)
out = l.Dense(10, activation='softmax')(x)
return keras.models.Model([inp], [out])
def _build_mnist_layerwise_pruned_model(pruning_params):
if pruning_params is None:
raise ValueError('pruning_params should be provided.')
return keras.Sequential([
prune.prune_low_magnitude(
l.Conv2D(32, 5, padding='same', activation='relu'),
input_shape=(28, 28, 1),
**pruning_params),
l.MaxPooling2D((2, 2), (2, 2), padding='same'),
l.BatchNormalization(),
prune.prune_low_magnitude(
l.Conv2D(64, 5, padding='same', activation='relu'), **pruning_params),
l.MaxPooling2D((2, 2), (2, 2), padding='same'),
l.Flatten(),
prune.prune_low_magnitude(
l.Dense(1024, activation='relu'), **pruning_params),
l.Dropout(0.4),
prune.prune_low_magnitude(
l.Dense(10, activation='softmax'), **pruning_params)
])
def build_mnist_model(model_type, pruning_params=None):
return {
'sequential': _build_mnist_sequential_model(),
'functional': _build_mnist_functional_model(),
'layer_list': _build_mnist_layer_list(),
'layer_wise': _build_mnist_layerwise_pruned_model(pruning_params),
}[model_type]
def model_type_keys():
return ['sequential', 'functional', 'layer_list', 'layer_wise']
def list_to_named_parameters(param_name, options):
"""Convert list of options for parameter to input to @parameterized.named_parameters.
Arguments:
param_name: name of parameter
options: list of options for parameter
Returns:
named_params: input to @parameterized.named_parameters
Needed to stack multiple parameters (e.g. with keras run_all_modes).
"""
def snakecase_to_camelcase(value):
# Non-comprensive check for camelcase already.
if value[0].isupper() and '_' not in value:
return value
camelcase = ''
for s in value.split('_'):
camelcase += s.capitalize()
return camelcase
def name(s):
if isinstance(s, str):
return s
return s.__name__
named_params = []
for key in options:
named_params.append({
'testcase_name': snakecase_to_camelcase(name(key)),
param_name: key
})
return named_params
def _save_restore_keras_model(model):
_, keras_file = tempfile.mkstemp('.h5')
keras.models.save_model(model, keras_file)
with prune.prune_scope():
loaded_model = keras.models.load_model(keras_file)
return loaded_model
def _save_restore_tf_model(model):
tmpdir = tempfile.mkdtemp()
tf.keras.models.save_model(model, tmpdir, save_format='tf')
with prune.prune_scope():
loaded_model = tf.keras.models.load_model(tmpdir)
return loaded_model
def save_restore_fns():
return [_save_restore_keras_model, _save_restore_tf_model]
# Assertion/Sparsity Verification functions.
def _get_sparsity(weights):
return 1.0 - np.count_nonzero(weights) / float(weights.size)
def assert_model_sparsity(test_case, sparsity, model, rtol=1e-6, atol=1e-6):
for layer in model.layers:
if isinstance(layer, pruning_wrapper.PruneLowMagnitude):
for weight in layer.layer.get_prunable_weights():
test_case.assertAllClose(
sparsity, _get_sparsity(tf.keras.backend.get_value(weight)), rtol=rtol, atol=atol)
# Check if model does not have target sparsity.
def is_model_sparsity_not(sparsity, model):
for layer in model.layers:
if isinstance(layer, pruning_wrapper.PruneLowMagnitude):
for weight in layer.layer.get_prunable_weights():
if sparsity != _get_sparsity(tf.keras.backend.get_value(weight)):
return True
return False
| apache-2.0 | 1,986,405,175,058,615,300 | 29.972527 | 94 | 0.664006 | false |
finger563/editor | tests/flatProxyModel.py | 1 | 3113 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtCore, QtGui
class FlatProxyModel(QtGui.QAbstractProxyModel):
def sourceDataChanged(self, topLeft, bottomRight):
self.dataChanged.emit(self.mapFromSource(topLeft),
self.mapFromSource(bottomRight))
def buildMap(self, model, parent=QtCore.QModelIndex(), row=0):
if row == 0:
self.m_rowMap = {}
self.m_indexMap = {}
rows = model.rowCount(parent)
for r in range(rows):
index = model.index(r, 0, parent)
print('row', row, 'item', model.data(index))
self.m_rowMap[index] = row
self.m_indexMap[row] = index
row = row + 1
if model.hasChildren(index):
row = self.buildMap(model, index, row)
return row
def setSourceModel(self, model):
QtGui.QAbstractProxyModel.setSourceModel(self, model)
self.buildMap(model)
model.dataChanged.connect(self.sourceDataChanged)
def mapFromSource(self, index):
if index not in self.m_rowMap:
return QtCore.QModelIndex()
# print('mapping to row', self.m_rowMap[index], flush = True)
return self.createIndex(self.m_rowMap[index], index.column())
def mapToSource(self, index):
if not index.isValid() or index.row() not in self.m_indexMap:
return QtCore.QModelIndex()
# print('mapping from row', index.row(), flush = True)
return self.m_indexMap[index.row()]
def columnCount(self, parent):
return QtGui.QAbstractProxyModel.sourceModel(self)\
.columnCount(self.mapToSource(parent))
def rowCount(self, parent):
# print('rows:', len(self.m_rowMap), flush=True)
return len(self.m_rowMap) if not parent.isValid() else 0
def index(self, row, column, parent):
# print('index for:', row, column, flush=True)
if parent.isValid():
return QtCore.QModelIndex()
return self.createIndex(row, column)
def parent(self, index):
return QtCore.QModelIndex()
def __init__(self, parent=None):
super(FlatProxyModel, self).__init__(parent)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
model = QtGui.QStandardItemModel()
names = ['Foo', 'Bar', 'Baz']
for first in names:
row = QtGui.QStandardItem(first)
for second in names:
row.appendRow(QtGui.QStandardItem(first+second))
model.appendRow(row)
proxy = FlatProxyModel()
proxy.setSourceModel(model)
nestedProxy = FlatProxyModel()
nestedProxy.setSourceModel(proxy)
w = QtGui.QWidget()
layout = QtGui.QHBoxLayout(w)
view = QtGui.QTreeView()
view.setModel(model)
view.expandAll()
view.header().hide()
layout.addWidget(view)
view = QtGui.QListView()
view.setModel(proxy)
layout.addWidget(view)
view = QtGui.QListView()
view.setModel(nestedProxy)
layout.addWidget(view)
w.show()
sys.exit(app.exec_())
| mit | 131,644,875,371,513,710 | 30.765306 | 78 | 0.610022 | false |
Jumpscale/play8 | sockettest.py | 1 | 1790 | # import nnpy
# import time
# s=nnpy.Socket(nnpy.AF_SP,nnpy.REP)
#
#
# s.bind('tcp://127.0.0.1:5555')
#
# # s.setsockopt(option=nnpy.RCVBUF,level=nnpy.SOL_SOCKET,value=1024*1024)
# # s.getsockopt(option=nnpy.RCVBUF,level=nnpy.SOL_SOCKET)
#
# counter=0
# while True:
# try:
# res=s.recv(flags=nnpy.DONTWAIT)
# counter+=1
# except Exception as e:
# if not str(e)=='Resource temporarily unavailable':
# raise(e)
# from IPython import embed
# print ("DEBUG NOW 9")
# embed()
# raise RuntimeError("stop debug here")
# time.sleep(1)
# print(counter)
# continue
#
# s.send("ok")
# # print(res)
from JumpScale import j
def MyMethod(hello):
import time
counter=0
while True:
time.sleep(1)
counter+=1
print("%s:%s"%(hello,counter))
import asyncio
import logging
import aionn
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
counter=0
async def reader(socket,counter):
while True:
# print('receiving...')
name = await socket.recv()
# print('received:', value)
p = j.core.processmanager.startProcess(method=MyMethod,args={"hello":name.decode()},name=name.decode())
counter+=1
print(counter)
async def logger():
counter=0
while True:
for key,p in j.core.processmanager.processes.items():
p.sync()
print(p.new_stdout)
counter+=1
await asyncio.sleep(1)
print("logger:%s"%counter)
async def main(loop):
await asyncio.wait([reader(socket,counter),logger()]),
socket = aionn.Socket(aionn.AF_SP, aionn.PULL)
socket.bind('tcp://*:5555')
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
| apache-2.0 | -7,030,116,780,493,325,000 | 22.246753 | 111 | 0.60838 | false |
NazarioJL/google-foobar | level_3/the_grandest_staircase_of_them_all/solution.py | 1 | 1603 | def answer(n):
# there will be only *one* sequence with count 1
result = make_stairs_count(n) - 1
return result
def make_stairs(total_remaining):
"""Returns a list of all sequences of increasing values that add up to total_remaining"""
all_lists = []
def make_stairs_rec(prev_step_size, left, l):
if left == 0:
all_lists.append(l)
return
if left < 0:
return
for new_step_size in xrange(prev_step_size + 1, left + 1):
new_left = left - new_step_size
make_stairs_rec(new_step_size, new_left, l + [new_step_size])
return
make_stairs_rec(0, total_remaining, [])
return all_lists
def make_stairs_count(total_remaining):
"""Returns the count of all sequences of increasing values that add up to total_remaining
Since the problem only requires the count, this method will not keep track of the
actual sequence.
"""
# use for memoization
memo = {}
def make_stairs_count_rec(prev_step_size, remaining):
if remaining == 0:
return 1
if remaining < 0:
return 0
result = 0
for new_step_size in xrange(prev_step_size + 1, remaining + 1):
new_remaining = remaining - new_step_size
args = (new_step_size, new_remaining)
if args not in memo:
memo[args] = make_stairs_count_rec(new_step_size, new_remaining)
result += memo[args]
return result
all_count = make_stairs_count_rec(0, total_remaining)
return all_count
| unlicense | 3,313,028,360,251,223,000 | 25.716667 | 93 | 0.59451 | false |
frogbywyplay/genbox_xintegtools | xintegtools/xreport/__init__.py | 1 | 1100 | #
# Copyright (C) 2006-2014 Wyplay, All Rights Reserved.
# This file is part of xintegtools.
#
# xintegtools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# xintegtools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see file COPYING.
# If not, see <http://www.gnu.org/licenses/>.
#
#
from __future__ import absolute_import
from xintegtools.xreport.report import XReport # noqa
from xintegtools.xreport.cmdline import XReportCmdline, XCompareCmdline # noqa
from xintegtools.xreport.output_xml import XReportXMLOutput, XCompareXMLOutput # noqa
from xintegtools.xreport.output_txt import XReportTXTOutput, XCompareTXTOutput # noqa
| gpl-2.0 | -7,658,095,238,941,135,000 | 41.307692 | 86 | 0.780909 | false |
DarkFenX/Pyfa | eos/saveddata/module.py | 1 | 46249 | # ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
import math
from logbook import Logger
from sqlalchemy.orm import reconstructor, validates
import eos.db
from eos.const import FittingHardpoint, FittingModuleState, FittingSlot
from eos.effectHandlerHelpers import HandledCharge, HandledItem
from eos.modifiedAttributeDict import ChargeAttrShortcut, ItemAttrShortcut, ModifiedAttributeDict
from eos.saveddata.citadel import Citadel
from eos.saveddata.mutator import Mutator
from eos.utils.cycles import CycleInfo, CycleSequence
from eos.utils.default import DEFAULT
from eos.utils.float import floatUnerr
from eos.utils.spoolSupport import calculateSpoolup, resolveSpoolOptions
from eos.utils.stats import DmgTypes, RRTypes
pyfalog = Logger(__name__)
ProjectedMap = {
FittingModuleState.OVERHEATED: FittingModuleState.ACTIVE,
FittingModuleState.ACTIVE: FittingModuleState.OFFLINE,
FittingModuleState.OFFLINE: FittingModuleState.ACTIVE,
FittingModuleState.ONLINE: FittingModuleState.ACTIVE # Just in case
}
# Old state : New State
LocalMap = {
FittingModuleState.OVERHEATED: FittingModuleState.ACTIVE,
FittingModuleState.ACTIVE: FittingModuleState.ONLINE,
FittingModuleState.OFFLINE: FittingModuleState.ONLINE,
FittingModuleState.ONLINE: FittingModuleState.ACTIVE
}
# For system effects. They should only ever be online or offline
ProjectedSystem = {
FittingModuleState.OFFLINE: FittingModuleState.ONLINE,
FittingModuleState.ONLINE: FittingModuleState.OFFLINE
}
class Module(HandledItem, HandledCharge, ItemAttrShortcut, ChargeAttrShortcut):
"""An instance of this class represents a module together with its charge and modified attributes"""
MINING_ATTRIBUTES = ("miningAmount",)
SYSTEM_GROUPS = ("Effect Beacon", "MassiveEnvironments", "Abyssal Hazards", "Non-Interactable Object")
def __init__(self, item, baseItem=None, mutaplasmid=None):
"""Initialize a module from the program"""
self.itemID = item.ID if item is not None else None
self.baseItemID = baseItem.ID if baseItem is not None else None
self.mutaplasmidID = mutaplasmid.ID if mutaplasmid is not None else None
if baseItem is not None:
# we're working with a mutated module, need to get abyssal module loaded with the base attributes
# Note: there may be a better way of doing this, such as a metho on this classe to convert(mutaplamid). This
# will require a bit more research though, considering there has never been a need to "swap" out the item of a Module
# before, and there may be assumptions taken with regards to the item never changing (pre-calculated / cached results, for example)
self.__item = eos.db.getItemWithBaseItemAttribute(self.itemID, self.baseItemID)
self.__baseItem = baseItem
self.__mutaplasmid = mutaplasmid
else:
self.__item = item
self.__baseItem = baseItem
self.__mutaplasmid = mutaplasmid
if item is not None and self.isInvalid:
raise ValueError("Passed item is not a Module")
self.__charge = None
self.projected = False
self.projectionRange = None
self.state = FittingModuleState.ONLINE
self.build()
@reconstructor
def init(self):
"""Initialize a module from the database and validate"""
self.__item = None
self.__baseItem = None
self.__charge = None
self.__mutaplasmid = None
# we need this early if module is invalid and returns early
self.__slot = self.dummySlot
if self.itemID:
self.__item = eos.db.getItem(self.itemID)
if self.__item is None:
pyfalog.error("Item (id: {0}) does not exist", self.itemID)
return
if self.baseItemID:
self.__item = eos.db.getItemWithBaseItemAttribute(self.itemID, self.baseItemID)
self.__baseItem = eos.db.getItem(self.baseItemID)
self.__mutaplasmid = eos.db.getMutaplasmid(self.mutaplasmidID)
if self.__baseItem is None:
pyfalog.error("Base Item (id: {0}) does not exist", self.itemID)
return
if self.isInvalid:
pyfalog.error("Item (id: {0}) is not a Module", self.itemID)
return
if self.chargeID:
self.__charge = eos.db.getItem(self.chargeID)
self.build()
def build(self):
""" Builds internal module variables from both init's """
if self.__charge and self.__charge.category.name != "Charge":
self.__charge = None
self.__baseVolley = None
self.__baseRRAmount = None
self.__miningyield = None
self.__reloadTime = None
self.__reloadForce = None
self.__chargeCycles = None
self.__hardpoint = FittingHardpoint.NONE
self.__itemModifiedAttributes = ModifiedAttributeDict(parent=self)
self.__chargeModifiedAttributes = ModifiedAttributeDict(parent=self)
self.__slot = self.dummySlot # defaults to None
if self.__item:
self.__itemModifiedAttributes.original = self.__item.attributes
self.__itemModifiedAttributes.overrides = self.__item.overrides
self.__hardpoint = self.__calculateHardpoint(self.__item)
self.__slot = self.calculateSlot(self.__item)
# Instantiate / remove mutators if this is a mutated module
if self.__baseItem:
for x in self.mutaplasmid.attributes:
attr = self.item.attributes[x.name]
id = attr.ID
if id not in self.mutators: # create the mutator
Mutator(self, attr, attr.value)
# @todo: remove attributes that are no longer part of the mutaplasmid.
self.__itemModifiedAttributes.mutators = self.mutators
if self.__charge:
self.__chargeModifiedAttributes.original = self.__charge.attributes
self.__chargeModifiedAttributes.overrides = self.__charge.overrides
@classmethod
def buildEmpty(cls, slot):
empty = Module(None)
empty.__slot = slot
empty.dummySlot = slot
return empty
@classmethod
def buildRack(cls, slot, num=None):
empty = Rack(None)
empty.__slot = slot
empty.dummySlot = slot
empty.num = num
return empty
@property
def isEmpty(self):
return self.dummySlot is not None
@property
def hardpoint(self):
return self.__hardpoint
@property
def isInvalid(self):
# todo: validate baseItem as well if it's set.
if self.isEmpty:
return False
return (
self.__item is None or (
self.__item.category.name not in ("Module", "Subsystem", "Structure Module") and
self.__item.group.name not in self.SYSTEM_GROUPS) or
(self.item.isAbyssal and not self.isMutated))
@property
def isMutated(self):
return self.baseItemID and self.mutaplasmidID
@property
def numCharges(self):
return self.getNumCharges(self.charge)
def getNumCharges(self, charge):
if charge is None:
charges = 0
else:
chargeVolume = charge.volume
containerCapacity = self.item.capacity
if chargeVolume is None or containerCapacity is None:
charges = 0
else:
charges = int(floatUnerr(containerCapacity / chargeVolume))
return charges
@property
def numShots(self):
if self.charge is None:
return 0
if self.__chargeCycles is None and self.charge:
numCharges = self.numCharges
# Usual ammo like projectiles and missiles
if numCharges > 0 and "chargeRate" in self.itemModifiedAttributes:
self.__chargeCycles = self.__calculateAmmoShots()
# Frequency crystals (combat and mining lasers)
elif numCharges > 0 and "crystalsGetDamaged" in self.chargeModifiedAttributes:
self.__chargeCycles = self.__calculateCrystalShots()
# Scripts and stuff
else:
self.__chargeCycles = 0
return self.__chargeCycles
else:
return self.__chargeCycles
@property
def modPosition(self):
return self.getModPosition()
def getModPosition(self, fit=None):
# Pass in fit for reliability. When it's not passed, we rely on owner and owner
# is set by sqlalchemy during flush
fit = fit if fit is not None else self.owner
if fit:
container = fit.projectedModules if self.isProjected else fit.modules
try:
return container.index(self)
except ValueError:
return None
return None
@property
def isProjected(self):
if self.owner:
return self in self.owner.projectedModules
return None
@property
def isExclusiveSystemEffect(self):
return self.item.group.name in ("Effect Beacon", "Non-Interactable Object", "MassiveEnvironments")
@property
def isCapitalSize(self):
return self.getModifiedItemAttr("volume", 0) >= 4000
@property
def hpBeforeReload(self):
"""
If item is some kind of repairer with charges, calculate
HP it reps before going into reload.
"""
cycles = self.numShots
armorRep = self.getModifiedItemAttr("armorDamageAmount") or 0
shieldRep = self.getModifiedItemAttr("shieldBonus") or 0
if not cycles or (not armorRep and not shieldRep):
return 0
hp = round((armorRep + shieldRep) * cycles)
return hp
def __calculateAmmoShots(self):
if self.charge is not None:
# Set number of cycles before reload is needed
# numcycles = math.floor(module_capacity / (module_volume * module_chargerate))
chargeRate = self.getModifiedItemAttr("chargeRate")
numCharges = self.numCharges
numShots = math.floor(numCharges / chargeRate)
else:
numShots = None
return numShots
def __calculateCrystalShots(self):
if self.charge is not None:
if self.getModifiedChargeAttr("crystalsGetDamaged") == 1:
# For depletable crystals, calculate average amount of shots before it's destroyed
hp = self.getModifiedChargeAttr("hp")
chance = self.getModifiedChargeAttr("crystalVolatilityChance")
damage = self.getModifiedChargeAttr("crystalVolatilityDamage")
crystals = self.numCharges
numShots = math.floor((crystals * hp) / (damage * chance))
else:
# Set 0 (infinite) for permanent crystals like t1 laser crystals
numShots = 0
else:
numShots = None
return numShots
@property
def maxRange(self):
attrs = ("maxRange", "shieldTransferRange", "powerTransferRange",
"energyDestabilizationRange", "empFieldRange",
"ecmBurstRange", "warpScrambleRange", "cargoScanRange",
"shipScanRange", "surveyScanRange")
maxRange = None
for attr in attrs:
maxRange = self.getModifiedItemAttr(attr, None)
if maxRange is not None:
break
if maxRange is not None:
if 'burst projector' in self.item.name.lower():
maxRange -= self.owner.ship.getModifiedItemAttr("radius")
return maxRange
missileMaxRangeData = self.missileMaxRangeData
if missileMaxRangeData is None:
return None
lowerRange, higherRange, higherChance = missileMaxRangeData
maxRange = lowerRange * (1 - higherChance) + higherRange * higherChance
return maxRange
@property
def missileMaxRangeData(self):
if self.charge is None:
return None
try:
chargeName = self.charge.group.name
except AttributeError:
pass
else:
if chargeName in ("Scanner Probe", "Survey Probe"):
return None
def calculateRange(maxVelocity, mass, agility, flightTime):
# Source: http://www.eveonline.com/ingameboard.asp?a=topic&threadID=1307419&page=1#15
# D_m = V_m * (T_m + T_0*[exp(- T_m/T_0)-1])
accelTime = min(flightTime, mass * agility / 1000000)
# Average distance done during acceleration
duringAcceleration = maxVelocity / 2 * accelTime
# Distance done after being at full speed
fullSpeed = maxVelocity * (flightTime - accelTime)
maxRange = duringAcceleration + fullSpeed
return maxRange
maxVelocity = self.getModifiedChargeAttr("maxVelocity")
if not maxVelocity:
return None
shipRadius = self.owner.ship.getModifiedItemAttr("radius")
# Flight time has bonus based on ship radius, see https://github.com/pyfa-org/Pyfa/issues/2083
flightTime = floatUnerr(self.getModifiedChargeAttr("explosionDelay") / 1000 + shipRadius / maxVelocity)
mass = self.getModifiedChargeAttr("mass")
agility = self.getModifiedChargeAttr("agility")
lowerTime = math.floor(flightTime)
higherTime = math.ceil(flightTime)
lowerRange = calculateRange(maxVelocity, mass, agility, lowerTime)
higherRange = calculateRange(maxVelocity, mass, agility, higherTime)
# Fof range limit is supposedly calculated based on overview (surface-to-surface) range
if 'fofMissileLaunching' in self.charge.effects:
rangeLimit = self.getModifiedChargeAttr("maxFOFTargetRange")
if rangeLimit:
lowerRange = min(lowerRange, rangeLimit)
higherRange = min(higherRange, rangeLimit)
# Make range center-to-surface, as missiles spawn in the center of the ship
lowerRange = max(0, lowerRange - shipRadius)
higherRange = max(0, higherRange - shipRadius)
higherChance = flightTime - lowerTime
return lowerRange, higherRange, higherChance
@property
def falloff(self):
attrs = ("falloffEffectiveness", "falloff", "shipScanFalloff")
for attr in attrs:
falloff = self.getModifiedItemAttr(attr, None)
if falloff is not None:
return falloff
@property
def slot(self):
return self.__slot
@property
def itemModifiedAttributes(self):
return self.__itemModifiedAttributes
@property
def chargeModifiedAttributes(self):
return self.__chargeModifiedAttributes
@property
def item(self):
return self.__item if self.__item != 0 else None
@property
def baseItem(self):
return self.__baseItem
@property
def mutaplasmid(self):
return self.__mutaplasmid
@property
def charge(self):
return self.__charge if self.__charge != 0 else None
@charge.setter
def charge(self, charge):
self.__charge = charge
if charge is not None:
self.chargeID = charge.ID
self.__chargeModifiedAttributes.original = charge.attributes
self.__chargeModifiedAttributes.overrides = charge.overrides
else:
self.chargeID = None
self.__chargeModifiedAttributes.original = None
self.__chargeModifiedAttributes.overrides = {}
self.__itemModifiedAttributes.clear()
@property
def miningStats(self):
if self.__miningyield is None:
if self.isEmpty:
self.__miningyield = 0
else:
if self.state >= FittingModuleState.ACTIVE:
volley = self.getModifiedItemAttr("specialtyMiningAmount") or self.getModifiedItemAttr(
"miningAmount") or 0
if volley:
cycleParams = self.getCycleParameters()
if cycleParams is None:
self.__miningyield = 0
else:
cycleTime = cycleParams.averageTime
self.__miningyield = volley / (cycleTime / 1000.0)
else:
self.__miningyield = 0
else:
self.__miningyield = 0
return self.__miningyield
def isDealingDamage(self, ignoreState=False):
volleyParams = self.getVolleyParameters(ignoreState=ignoreState)
for volley in volleyParams.values():
if volley.total > 0:
return True
return False
def getVolleyParameters(self, spoolOptions=None, targetProfile=None, ignoreState=False):
if self.isEmpty or (self.state < FittingModuleState.ACTIVE and not ignoreState):
return {0: DmgTypes(0, 0, 0, 0)}
if self.__baseVolley is None:
self.__baseVolley = {}
dmgGetter = self.getModifiedChargeAttr if self.charge else self.getModifiedItemAttr
dmgMult = self.getModifiedItemAttr("damageMultiplier", 1)
# Some delay attributes have non-0 default value, so we have to pick according to effects
if {'superWeaponAmarr', 'superWeaponCaldari', 'superWeaponGallente', 'superWeaponMinmatar', 'lightningWeapon'}.intersection(self.item.effects):
dmgDelay = self.getModifiedItemAttr("damageDelayDuration", 0)
elif {'doomsdayBeamDOT', 'doomsdaySlash', 'doomsdayConeDOT'}.intersection(self.item.effects):
dmgDelay = self.getModifiedItemAttr("doomsdayWarningDuration", 0)
else:
dmgDelay = 0
dmgDuration = self.getModifiedItemAttr("doomsdayDamageDuration", 0)
dmgSubcycle = self.getModifiedItemAttr("doomsdayDamageCycleTime", 0)
# Reaper DD can damage each target only once
if dmgDuration != 0 and dmgSubcycle != 0 and 'doomsdaySlash' not in self.item.effects:
subcycles = math.floor(floatUnerr(dmgDuration / dmgSubcycle))
else:
subcycles = 1
for i in range(subcycles):
self.__baseVolley[dmgDelay + dmgSubcycle * i] = DmgTypes(
em=(dmgGetter("emDamage", 0)) * dmgMult,
thermal=(dmgGetter("thermalDamage", 0)) * dmgMult,
kinetic=(dmgGetter("kineticDamage", 0)) * dmgMult,
explosive=(dmgGetter("explosiveDamage", 0)) * dmgMult)
spoolType, spoolAmount = resolveSpoolOptions(spoolOptions, self)
spoolBoost = calculateSpoolup(
self.getModifiedItemAttr("damageMultiplierBonusMax", 0),
self.getModifiedItemAttr("damageMultiplierBonusPerCycle", 0),
self.rawCycleTime / 1000, spoolType, spoolAmount)[0]
spoolMultiplier = 1 + spoolBoost
adjustedVolley = {}
for volleyTime, volleyValue in self.__baseVolley.items():
adjustedVolley[volleyTime] = DmgTypes(
em=volleyValue.em * spoolMultiplier * (1 - getattr(targetProfile, "emAmount", 0)),
thermal=volleyValue.thermal * spoolMultiplier * (1 - getattr(targetProfile, "thermalAmount", 0)),
kinetic=volleyValue.kinetic * spoolMultiplier * (1 - getattr(targetProfile, "kineticAmount", 0)),
explosive=volleyValue.explosive * spoolMultiplier * (1 - getattr(targetProfile, "explosiveAmount", 0)))
return adjustedVolley
def getVolley(self, spoolOptions=None, targetProfile=None, ignoreState=False):
volleyParams = self.getVolleyParameters(spoolOptions=spoolOptions, targetProfile=targetProfile, ignoreState=ignoreState)
if len(volleyParams) == 0:
return DmgTypes(0, 0, 0, 0)
return volleyParams[min(volleyParams)]
def getDps(self, spoolOptions=None, targetProfile=None, ignoreState=False):
dmgDuringCycle = DmgTypes(0, 0, 0, 0)
cycleParams = self.getCycleParameters()
if cycleParams is None:
return dmgDuringCycle
volleyParams = self.getVolleyParameters(spoolOptions=spoolOptions, targetProfile=targetProfile, ignoreState=ignoreState)
avgCycleTime = cycleParams.averageTime
if len(volleyParams) == 0 or avgCycleTime == 0:
return dmgDuringCycle
for volleyValue in volleyParams.values():
dmgDuringCycle += volleyValue
dpsFactor = 1 / (avgCycleTime / 1000)
dps = DmgTypes(
em=dmgDuringCycle.em * dpsFactor,
thermal=dmgDuringCycle.thermal * dpsFactor,
kinetic=dmgDuringCycle.kinetic * dpsFactor,
explosive=dmgDuringCycle.explosive * dpsFactor)
return dps
def isRemoteRepping(self, ignoreState=False):
repParams = self.getRepAmountParameters(ignoreState=ignoreState)
for rrData in repParams.values():
if rrData:
return True
return False
def getRepAmountParameters(self, spoolOptions=None, ignoreState=False):
if self.isEmpty or (self.state < FittingModuleState.ACTIVE and not ignoreState):
return {}
remoteModuleGroups = {
"Remote Armor Repairer": "Armor",
"Ancillary Remote Armor Repairer": "Armor",
"Mutadaptive Remote Armor Repairer": "Armor",
"Remote Hull Repairer": "Hull",
"Remote Shield Booster": "Shield",
"Ancillary Remote Shield Booster": "Shield",
"Remote Capacitor Transmitter": "Capacitor"}
rrType = remoteModuleGroups.get(self.item.group.name)
if rrType is None:
return {}
if self.__baseRRAmount is None:
self.__baseRRAmount = {}
shieldAmount = 0
armorAmount = 0
hullAmount = 0
capacitorAmount = 0
if rrType == "Hull":
hullAmount += self.getModifiedItemAttr("structureDamageAmount", 0)
elif rrType == "Armor":
if self.item.group.name == "Ancillary Remote Armor Repairer" and self.charge:
mult = self.getModifiedItemAttr("chargedArmorDamageMultiplier", 1)
else:
mult = 1
armorAmount += self.getModifiedItemAttr("armorDamageAmount", 0) * mult
elif rrType == "Shield":
shieldAmount += self.getModifiedItemAttr("shieldBonus", 0)
elif rrType == "Capacitor":
capacitorAmount += self.getModifiedItemAttr("powerTransferAmount", 0)
rrDelay = 0 if rrType == "Shield" else self.rawCycleTime
self.__baseRRAmount[rrDelay] = RRTypes(shield=shieldAmount, armor=armorAmount, hull=hullAmount, capacitor=capacitorAmount)
spoolType, spoolAmount = resolveSpoolOptions(spoolOptions, self)
spoolBoost = calculateSpoolup(
self.getModifiedItemAttr("repairMultiplierBonusMax", 0),
self.getModifiedItemAttr("repairMultiplierBonusPerCycle", 0),
self.rawCycleTime / 1000, spoolType, spoolAmount)[0]
spoolMultiplier = 1 + spoolBoost
adjustedRRAmount = {}
for rrTime, rrAmount in self.__baseRRAmount.items():
if spoolMultiplier == 1:
adjustedRRAmount[rrTime] = rrAmount
else:
adjustedRRAmount[rrTime] = rrAmount * spoolMultiplier
return adjustedRRAmount
def getRemoteReps(self, spoolOptions=None, ignoreState=False, reloadOverride=None):
rrDuringCycle = RRTypes(0, 0, 0, 0)
cycleParams = self.getCycleParameters(reloadOverride=reloadOverride)
if cycleParams is None:
return rrDuringCycle
repAmountParams = self.getRepAmountParameters(spoolOptions=spoolOptions, ignoreState=ignoreState)
avgCycleTime = cycleParams.averageTime
if len(repAmountParams) == 0 or avgCycleTime == 0:
return rrDuringCycle
for rrAmount in repAmountParams.values():
rrDuringCycle += rrAmount
rrFactor = 1 / (avgCycleTime / 1000)
rps = rrDuringCycle * rrFactor
return rps
def getSpoolData(self, spoolOptions=None):
weaponMultMax = self.getModifiedItemAttr("damageMultiplierBonusMax", 0)
weaponMultPerCycle = self.getModifiedItemAttr("damageMultiplierBonusPerCycle", 0)
if weaponMultMax and weaponMultPerCycle:
spoolType, spoolAmount = resolveSpoolOptions(spoolOptions, self)
_, spoolCycles, spoolTime = calculateSpoolup(
weaponMultMax, weaponMultPerCycle,
self.rawCycleTime / 1000, spoolType, spoolAmount)
return spoolCycles, spoolTime
rrMultMax = self.getModifiedItemAttr("repairMultiplierBonusMax", 0)
rrMultPerCycle = self.getModifiedItemAttr("repairMultiplierBonusPerCycle", 0)
if rrMultMax and rrMultPerCycle:
spoolType, spoolAmount = resolveSpoolOptions(spoolOptions, self)
_, spoolCycles, spoolTime = calculateSpoolup(
rrMultMax, rrMultPerCycle,
self.rawCycleTime / 1000, spoolType, spoolAmount)
return spoolCycles, spoolTime
return 0, 0
@property
def reloadTime(self):
# Get reload time from attrs first, then use
# custom value specified otherwise (e.g. in effects)
moduleReloadTime = self.getModifiedItemAttr("reloadTime")
if moduleReloadTime is None:
moduleReloadTime = self.__reloadTime
return moduleReloadTime or 0.0
@reloadTime.setter
def reloadTime(self, milliseconds):
self.__reloadTime = milliseconds
@property
def forceReload(self):
return self.__reloadForce
@forceReload.setter
def forceReload(self, type):
self.__reloadForce = type
def fits(self, fit, hardpointLimit=True):
"""
Function that determines if a module can be fit to the ship. We always apply slot restrictions no matter what
(too many assumptions made on this), however all other fitting restrictions are optional
"""
slot = self.slot
if fit.getSlotsFree(slot) <= (0 if self.owner != fit else -1):
return False
fits = self.__fitRestrictions(fit, hardpointLimit)
if not fits and fit.ignoreRestrictions:
self.restrictionOverridden = True
fits = True
elif fits and fit.ignoreRestrictions:
self.restrictionOverridden = False
return fits
def __fitRestrictions(self, fit, hardpointLimit=True):
if not fit.canFit(self.item):
return False
# EVE doesn't let capital modules be fit onto subcapital hulls. Confirmed by CCP Larrikin that this is dictated
# by the modules volume. See GH issue #1096
if not isinstance(fit.ship, Citadel) and fit.ship.getModifiedItemAttr("isCapitalSize", 0) != 1 and self.isCapitalSize:
return False
# If the mod is a subsystem, don't let two subs in the same slot fit
if self.slot == FittingSlot.SUBSYSTEM:
subSlot = self.getModifiedItemAttr("subSystemSlot")
for mod in fit.modules:
if mod is self:
continue
if mod.getModifiedItemAttr("subSystemSlot") == subSlot:
return False
# Check rig sizes
if self.slot == FittingSlot.RIG:
if self.getModifiedItemAttr("rigSize") != fit.ship.getModifiedItemAttr("rigSize"):
return False
# Check max group fitted
max = self.getModifiedItemAttr("maxGroupFitted", None)
if max is not None:
current = 0 # if self.owner != fit else -1 # Disabled, see #1278
for mod in fit.modules:
if (mod.item and mod.item.groupID == self.item.groupID and
self.getModPosition(fit) != mod.getModPosition(fit)):
current += 1
if current >= max:
return False
# Check this only if we're told to do so
if hardpointLimit:
if fit.getHardpointsFree(self.hardpoint) < 1:
return False
return True
def isValidState(self, state):
"""
Check if the state is valid for this module, without considering other modules at all
"""
# Check if we're within bounds
if state < -1 or state > 2:
return False
elif state >= FittingModuleState.ACTIVE and (not self.item.isType("active") or self.getModifiedItemAttr('activationBlocked') > 0):
return False
elif state == FittingModuleState.OVERHEATED and not self.item.isType("overheat"):
return False
else:
return True
def getMaxState(self, proposedState=None):
states = sorted((s for s in FittingModuleState if proposedState is None or s <= proposedState), reverse=True)
for state in states:
if self.isValidState(state):
return state
def canHaveState(self, state=None, projectedOnto=None):
"""
Check with other modules if there are restrictions that might not allow this module to be activated.
Returns True if state is allowed, or max state module can have if current state is invalid.
"""
# If we're going to set module to offline, it should be fine for all cases
item = self.item
if state <= FittingModuleState.OFFLINE:
return True
# Check if the local module is over it's max limit; if it's not, we're fine
maxGroupOnline = self.getModifiedItemAttr("maxGroupOnline", None)
maxGroupActive = self.getModifiedItemAttr("maxGroupActive", None)
if maxGroupOnline is None and maxGroupActive is None and projectedOnto is None:
return True
# Following is applicable only to local modules, we do not want to limit projected
if projectedOnto is None:
currOnline = 0
currActive = 0
group = item.group.name
maxState = None
for mod in self.owner.modules:
currItem = getattr(mod, "item", None)
if currItem is not None and currItem.group.name == group:
if mod.state >= FittingModuleState.ONLINE:
currOnline += 1
if mod.state >= FittingModuleState.ACTIVE:
currActive += 1
if maxGroupOnline is not None and currOnline > maxGroupOnline:
if maxState is None or maxState > FittingModuleState.OFFLINE:
maxState = FittingModuleState.OFFLINE
break
if maxGroupActive is not None and currActive > maxGroupActive:
if maxState is None or maxState > FittingModuleState.ONLINE:
maxState = FittingModuleState.ONLINE
return True if maxState is None else maxState
# For projected, we're checking if ship is vulnerable to given item
else:
# Do not allow to apply offensive modules on ship with offensive module immunite, with few exceptions
# (all effects which apply instant modification are exception, generally speaking)
if item.offensive and projectedOnto.ship.getModifiedItemAttr("disallowOffensiveModifiers") == 1:
offensiveNonModifiers = {"energyDestabilizationNew",
"leech",
"energyNosferatuFalloff",
"energyNeutralizerFalloff"}
if not offensiveNonModifiers.intersection(set(item.effects)):
return FittingModuleState.OFFLINE
# If assistive modules are not allowed, do not let to apply these altogether
if item.assistive and projectedOnto.ship.getModifiedItemAttr("disallowAssistance") == 1:
return FittingModuleState.OFFLINE
return True
def isValidCharge(self, charge):
# Check sizes, if 'charge size > module volume' it won't fit
if charge is None:
return True
chargeVolume = charge.volume
moduleCapacity = self.item.capacity
if chargeVolume is not None and moduleCapacity is not None and chargeVolume > moduleCapacity:
return False
itemChargeSize = self.getModifiedItemAttr("chargeSize")
if itemChargeSize > 0:
chargeSize = charge.getAttribute('chargeSize')
if itemChargeSize != chargeSize:
return False
chargeGroup = charge.groupID
for i in range(5):
itemChargeGroup = self.getModifiedItemAttr('chargeGroup' + str(i), None)
if itemChargeGroup is None:
continue
if itemChargeGroup == chargeGroup:
return True
return False
def getValidCharges(self):
validCharges = set()
for i in range(5):
itemChargeGroup = self.getModifiedItemAttr('chargeGroup' + str(i), None)
if itemChargeGroup is not None:
g = eos.db.getGroup(int(itemChargeGroup), eager="items.attributes")
if g is None:
continue
for singleItem in g.items:
if singleItem.published and self.isValidCharge(singleItem):
validCharges.add(singleItem)
return validCharges
@staticmethod
def __calculateHardpoint(item):
effectHardpointMap = {
"turretFitted" : FittingHardpoint.TURRET,
"launcherFitted": FittingHardpoint.MISSILE
}
if item is None:
return FittingHardpoint.NONE
for effectName, slot in effectHardpointMap.items():
if effectName in item.effects:
return slot
return FittingHardpoint.NONE
@staticmethod
def calculateSlot(item):
effectSlotMap = {
"rigSlot" : FittingSlot.RIG.value,
"loPower" : FittingSlot.LOW.value,
"medPower" : FittingSlot.MED.value,
"hiPower" : FittingSlot.HIGH.value,
"subSystem" : FittingSlot.SUBSYSTEM.value,
"serviceSlot": FittingSlot.SERVICE.value
}
if item is None:
return None
for effectName, slot in effectSlotMap.items():
if effectName in item.effects:
return slot
if item.group.name in Module.SYSTEM_GROUPS:
return FittingSlot.SYSTEM
return None
@validates("ID", "itemID", "ammoID")
def validator(self, key, val):
map = {
"ID" : lambda _val: isinstance(_val, int),
"itemID": lambda _val: _val is None or isinstance(_val, int),
"ammoID": lambda _val: isinstance(_val, int)
}
if not map[key](val):
raise ValueError(str(val) + " is not a valid value for " + key)
else:
return val
def clear(self):
self.__baseVolley = None
self.__baseRRAmount = None
self.__miningyield = None
self.__reloadTime = None
self.__reloadForce = None
self.__chargeCycles = None
self.itemModifiedAttributes.clear()
self.chargeModifiedAttributes.clear()
def calculateModifiedAttributes(self, fit, runTime, forceProjected=False, gang=False, forcedProjRange=DEFAULT):
# We will run the effect when two conditions are met:
# 1: It makes sense to run the effect
# The effect is either offline
# or the effect is passive and the module is in the online state (or higher)
# or the effect is active and the module is in the active state (or higher)
# or the effect is overheat and the module is in the overheated state (or higher)
# 2: the runtimes match
if self.projected or forceProjected:
context = "projected", "module"
projected = True
else:
context = ("module",)
projected = False
projectionRange = self.projectionRange if forcedProjRange is DEFAULT else forcedProjRange
if self.charge is not None:
# fix for #82 and it's regression #106
if not projected or (self.projected and not forceProjected) or gang:
for effect in self.charge.effects.values():
if (
effect.runTime == runTime and
effect.activeByDefault and (
effect.isType("offline") or
(effect.isType("passive") and self.state >= FittingModuleState.ONLINE) or
(effect.isType("active") and self.state >= FittingModuleState.ACTIVE)) and
(not gang or (gang and effect.isType("gang")))
):
contexts = ("moduleCharge",)
effect.handler(fit, self, contexts, projectionRange, effect=effect)
if self.item:
if self.state >= FittingModuleState.OVERHEATED:
for effect in self.item.effects.values():
if effect.runTime == runTime and \
effect.isType("overheat") \
and not forceProjected \
and effect.activeByDefault \
and ((gang and effect.isType("gang")) or not gang):
effect.handler(fit, self, context, projectionRange, effect=effect)
for effect in self.item.effects.values():
if effect.runTime == runTime and \
effect.activeByDefault and \
(effect.isType("offline") or
(effect.isType("passive") and self.state >= FittingModuleState.ONLINE) or
(effect.isType("active") and self.state >= FittingModuleState.ACTIVE)) \
and ((projected and effect.isType("projected")) or not projected) \
and ((gang and effect.isType("gang")) or not gang):
effect.handler(fit, self, context, projectionRange, effect=effect)
def getCycleParameters(self, reloadOverride=None):
"""Copied from new eos as well"""
# Determine if we'll take into account reload time or not
if reloadOverride is not None:
factorReload = reloadOverride
else:
factorReload = self.owner.factorReload if self.forceReload is None else self.forceReload
cycles_until_reload = self.numShots
if cycles_until_reload == 0:
cycles_until_reload = math.inf
active_time = self.rawCycleTime
if active_time == 0:
return None
forced_inactive_time = self.reactivationDelay
reload_time = self.reloadTime
# Effects which cannot be reloaded have the same processing whether
# caller wants to take reload time into account or not
if reload_time is None and cycles_until_reload < math.inf:
final_cycles = 1
early_cycles = cycles_until_reload - final_cycles
# Single cycle until effect cannot run anymore
if early_cycles == 0:
return CycleInfo(active_time, 0, 1, False)
# Multiple cycles with the same parameters
if forced_inactive_time == 0:
return CycleInfo(active_time, 0, cycles_until_reload, False)
# Multiple cycles with different parameters
return CycleSequence((
CycleInfo(active_time, forced_inactive_time, early_cycles, False),
CycleInfo(active_time, 0, final_cycles, False)
), 1)
# Module cycles the same way all the time in 3 cases:
# 1) caller doesn't want to take into account reload time
# 2) effect does not have to reload anything to keep running
# 3) effect has enough time to reload during inactivity periods
if (
not factorReload or
cycles_until_reload == math.inf or
forced_inactive_time >= reload_time
):
isInactivityReload = factorReload and forced_inactive_time >= reload_time
return CycleInfo(active_time, forced_inactive_time, math.inf, isInactivityReload)
# We've got to take reload into consideration
else:
final_cycles = 1
early_cycles = cycles_until_reload - final_cycles
# If effect has to reload after each its cycle, then its parameters
# are the same all the time
if early_cycles == 0:
return CycleInfo(active_time, reload_time, math.inf, True)
return CycleSequence((
CycleInfo(active_time, forced_inactive_time, early_cycles, False),
CycleInfo(active_time, reload_time, final_cycles, True)
), math.inf)
@property
def rawCycleTime(self):
speed = max(
self.getModifiedItemAttr("speed", 0), # Most weapons
self.getModifiedItemAttr("duration", 0), # Most average modules
self.getModifiedItemAttr("durationSensorDampeningBurstProjector", 0),
self.getModifiedItemAttr("durationTargetIlluminationBurstProjector", 0),
self.getModifiedItemAttr("durationECMJammerBurstProjector", 0),
self.getModifiedItemAttr("durationWeaponDisruptionBurstProjector", 0)
)
return speed
@property
def disallowRepeatingAction(self):
return self.getModifiedItemAttr("disallowRepeatingActivation", 0)
@property
def reactivationDelay(self):
return self.getModifiedItemAttr("moduleReactivationDelay", 0)
@property
def capUse(self):
capNeed = self.getModifiedItemAttr("capacitorNeed")
if capNeed and self.state >= FittingModuleState.ACTIVE:
cycleParams = self.getCycleParameters()
if cycleParams is None:
return 0
cycleTime = cycleParams.averageTime
if cycleTime > 0:
capUsed = capNeed / (cycleTime / 1000.0)
return capUsed
else:
return 0
@staticmethod
def getProposedState(mod, click, proposedState=None):
pyfalog.debug("Get proposed state for module.")
if mod.slot == FittingSlot.SUBSYSTEM or mod.isEmpty:
return FittingModuleState.ONLINE
if mod.slot == FittingSlot.SYSTEM:
transitionMap = ProjectedSystem
else:
transitionMap = ProjectedMap if mod.projected else LocalMap
currState = mod.state
if proposedState is not None:
state = proposedState
elif click == "right":
state = FittingModuleState.OVERHEATED
elif click == "ctrl":
state = FittingModuleState.OFFLINE
else:
state = transitionMap[currState]
# If passive module tries to transition into online and fails,
# put it to passive instead
if not mod.isValidState(state) and currState == FittingModuleState.ONLINE:
state = FittingModuleState.OFFLINE
return mod.getMaxState(proposedState=state)
def __deepcopy__(self, memo):
item = self.item
if item is None:
copy = Module.buildEmpty(self.slot)
else:
copy = Module(self.item, self.baseItem, self.mutaplasmid)
copy.charge = self.charge
copy.state = self.state
copy.spoolType = self.spoolType
copy.spoolAmount = self.spoolAmount
copy.projectionRange = self.projectionRange
for x in self.mutators.values():
Mutator(copy, x.attribute, x.value)
return copy
def rebase(self, item):
state = self.state
charge = self.charge
spoolType = self.spoolType
spoolAmount = self.spoolAmount
projectionRange = self.projectionRange
Module.__init__(self, item, self.baseItem, self.mutaplasmid)
self.state = state
if self.isValidCharge(charge):
self.charge = charge
self.spoolType = spoolType
self.spoolAmount = spoolAmount
self.projectionRange = projectionRange
for x in self.mutators.values():
Mutator(self, x.attribute, x.value)
def __repr__(self):
if self.item:
return "Module(ID={}, name={}) at {}".format(
self.item.ID, self.item.name, hex(id(self))
)
else:
return "EmptyModule() at {}".format(hex(id(self)))
class Rack(Module):
"""
This is simply the Module class named something else to differentiate
it for app logic. The only thing interesting about it is the num property,
which is the number of slots for this rack
"""
num = None
| gpl-3.0 | 1,596,012,414,091,344,100 | 41.159526 | 155 | 0.613894 | false |
sixty-north/structurizr-python | docs/source/conf.py | 1 | 5152 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Structurizr Python documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 5 16:42:42 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, os.path.abspath(os.path.join('..', '..')))
import structurizr
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'cartouche']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Structurizr Python'
copyright = '2017, Sixty North AS'
author = 'Sixty North AS'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = structurizr.__version__
# The full version, including alpha/beta/rc tags.
release = structurizr.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'StructurizrPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'StructurizrPython.tex', 'Structurizr Python Documentation',
'Sixty North AS', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'structurizrpython', 'Structurizr Python Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'StructurizrPython', 'Structurizr Python Documentation',
author, 'StructurizrPython', 'One line description of project.',
'Miscellaneous'),
]
| apache-2.0 | -4,845,932,692,731,908,000 | 29.666667 | 79 | 0.681289 | false |
ezarowny/url-condenser | url_condenser/url_condenser/settings.py | 1 | 3209 | """
Django settings for url_condenser project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+!6_uwpq6@ee)620m6f@lni3**fz5a8pjetd#)^e!t&hf#u&=k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'condensed_urls',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'url_condenser.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'url_condenser.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| mit | -3,015,443,904,954,819,600 | 25.303279 | 91 | 0.689 | false |
vsego/PyteArt | patgen.py | 1 | 2830 | #!/usr/bin/env python3
"""
Patterns generator for img2dat.py
"""
from itertools import combinations
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from string import printable, ascii_letters, punctuation
from sys import argv, stderr
class GenPatts:
allowedHoles = 0 # how many colors are permitted to remain unassigned
charSet = ascii_letters + punctuation # character set to be used for all or only first character
# (see reduceTo argument of asString() method)
colNum = 256 # the number of colors
font = "LiberationMono-Regular.ttf" # font file
maxChars = 3 # maximum number of overlapping characters
size = 31 # size of each character (bigger numbers produce finer images)
xy = (0, -size // 6) # where on the temporary image to put the character
skipChars = { "^", "#", "]", "/", "-" } # chars to be excluded (those with special meaning)
def asString(self, reduceTo = None):
"""
Generate patterns and return them as a list.
Parameter reduceTo is a string that is used for all characters but first.
If None, it is ignored and self.charSet is used for all characters (this might be a wee bit slow).
"""
patterns = [ "" for g in range(1, 256) ] + [ " " ]
left2do = self.colNum - 1
chars = 1
font = ImageFont.truetype(self.font, self.size)
imgSize = font.getsize('X')
charSet = set(self.charSet) - set(self.skipChars)
colFactor = 256 / self.colNum
while left2do > self.allowedHoles and chars <= self.maxChars:
for code in combinations(charSet, chars):
#if not self.skipChars.intersection(set(code)):
im = Image.new('L', imgSize, '#ffffff')
draw = ImageDraw.Draw(im)
for char in code:
draw.text(self.xy, char, '#000000', font)
hist = im.histogram()
col = round(sum([ hist[i] * i for i in range(len(hist)) ]) / sum(hist))
col = min(max(0, round(colFactor * round(col / colFactor))), 255)
if patterns[col] == "":
patterns[col] = code
left2do -= 1
if left2do <= 0:
break
chars += 1
if reduceTo and chars == 2:
charSet = set(reduceTo) - set(self.skipChars)
return patterns
def toFile(self, fname, reduceTo = None):
with open(fname, "w", encoding = "utf-8") as f:
f.write(str(self.asString()))
if __name__ == "__main__":
fname = (argv[1] if len(argv) > 1 else "img2dat.pat")
gp = GenPatts()
gp.toFile(fname, reduceTo = ascii_letters)
stderr.write("Patterns created and saved to \"" + fname + "\".\n")
| gpl-2.0 | -1,986,131,092,147,713,800 | 39.428571 | 106 | 0.576325 | false |
theindependentwolf/liverpool-discord-bot | result_details.py | 1 | 4915 | import discord
from random import randint
import random
import random
from random import randint
import urllib.request
from html.parser import HTMLParser
from bs4 import BeautifulSoup
import nocontext
import requests
import datetime
import config
import asyncio
import csv
import time
import dateutil.relativedelta as relativetd
def next_game():
"""
Count down to the next game
"""
current_time = datetime.datetime.now()
opponent_details = get_opponent_details()
if opponent_details:
oppo_time = opponent_details[0]
opponent_time = datetime.datetime(int(oppo_time[:4]), int(oppo_time[4:6]), int(oppo_time[6:8]), int(oppo_time[9:11]) - 1, int(oppo_time[11:13]))
countdown = relativetd.relativedelta(opponent_time, current_time)
countdown_readable = "{} day(s) {} hours {} minutes {} seconds".format(countdown.days, countdown.hours, countdown.minutes, countdown.seconds)
return "```{}\n{}```".format(countdown_readable, opponent_details[1])
else:
return "```No fixtures found in the calendar```"
def get_opponent_details():
"""
Return opponent details
"""
todays_date = time.strftime("%Y%m%d")
opponent_details = ""
with open('fixtures.csv','rt') as csvfile:
content = csv.reader(csvfile, delimiter = ',')
for row in content:
date = row[0]
summary = row[1]
if date[:8] >= todays_date:
return row
def get_readable_time(input_date):
"""
Convert yyyymmddT00000000 into readable time
"""
weekchart = {0:"Monday", 1:"Tuesday", 2:"Wednesday", 3:"Thursday", 4:"Friday",5:"Saturday", 6:"Sunday"}
readable_time = ""
separator_slash = "/"
separator_colon = ":"
space = " "
year = input_date[:4]
month = input_date[4:6]
date = input_date[6:8]
hour = input_date[9:11]
minute = input_date[11:13]
day = datetime.datetime(int(year), int(month), int(date), 0, 0, 0, 0).weekday()
return ('{:9s} {}/{}/{} {}:{}'.format(weekchart.get(day), month, date, year, hour, minute))
def get_fixtures():
"""
Gets the next 5 fixtures according to date
"""
printable_string ="```"
todays_date = time.strftime("%Y%m%d")
count = 1
with open('fixtures.csv','rt') as csvfile:
content = csv.reader(csvfile, delimiter=',')
for row in content:
date = row[0]
summary = row[1]
if date[:8] > todays_date:
printable_string += get_readable_time(date) + " " + get_home_away(summary) + " " + summary.replace("Liverpool","").replace(" v ","").strip() + "\n"
if count == config.number_of_fixtures:
printable_string += "```"
return printable_string
else:
count = count + 1
def get_home_away(summary):
"""
Tells if it's a home or an away fixture
"""
if summary.startswith('Liverpool'):
return "home"
else:
return "away"
def ten_games(*team):
"""
Get the results of the last 10 games for EPL Teams from the BBC Website
"""
if not team:
team = "Liverpool"
else:
team = team[0]
url = "http://www.bbc.com/sport/football/premier-league/table"
html = urllib.request.urlopen(url).read()
bs = BeautifulSoup(html, "html.parser")
tables = bs.findChildren('table')
my_table = tables[0]
rows = my_table.findChildren(['tr'])
printable_results = "```"
for row in rows:
if row.find('ol'):
team_name = row.find('td', class_="team-name")
if team.lower() in team_name.string.lower():
ten_games = row.find('ol').findChildren(['li'])
for game in ten_games:
printable_results += game.get('title') + "\n"
printable_results += "```"
print(printable_results)
# return printable_results
def team_form():
"""
Get the results of the last 10 games for EPL Teams from the BBC Website
"""
url = "http://www.bbc.com/sport/football/premier-league/table"
html = urllib.request.urlopen(url).read()
bs = BeautifulSoup(html, "html.parser")
tables = bs.findChildren('table')
my_table = tables[0]
rows = my_table.findChildren(['tr'])
position = 1
printable_form = "```"
for row in rows:
if row.find('ol'):
team_name = row.find('td', class_="team-name")
print(team_name)
ten_games = row.find('ol').findChildren(['li'])
printable_form += str(position).rjust(3) + " " + str(team_name.text.ljust(23))
for game in ten_games:
printable_form += game.string[0] + " "
printable_form += "\n"
position = position + 1
printable_form += "```"
# return printable_form
print(printable_form)
| mit | 744,983,116,459,146,000 | 29.71875 | 165 | 0.580671 | false |
richardcornish/smsweather | fabfile.py | 1 | 2185 | from fabric import task
from django.utils.termcolors import colorize
# 1. Local: ssh-add ~/.ssh/aws.pem
# 2. Local: Edit hosts, repo_name, pythonpath (if necessary)
# 3. Remote: Copy .env to to {code_dir}/.env:
hosts = [{
'host': 'ec2-3-89-247-193.compute-1.amazonaws.com',
'user': 'ubuntu',
}]
repo_name = 'emojiweather'
pythonpath = repo_name
service_name = repo_name
code_dir = f'/home/ubuntu/{repo_name}'
@task
def update(c):
print(colorize('\nUpdating code...', fg='white'))
c.run(f'cd {code_dir} && git pull origin master')
@task
def install(c):
print(colorize('\nInstalling dependencies...', fg='white'))
c.run(f'cd {code_dir} && source env/bin/activate && pip install -r requirements.txt')
@task
def migrate(c):
print(colorize('\nMigrating database...', fg='white'))
c.inline_ssh_env = True
c.run(f'source {code_dir}/.env && cd {code_dir} && source env/bin/activate && python {pythonpath}/manage.py migrate --noinput', env={'DEBUG': '$DEBUG', 'DATABASE_PASSWORD': '$DATABASE_PASSWORD'})
@task
def collect(c):
print(colorize('\nCopying static files...', fg='white'))
c.run(f'cd {code_dir} && source env/bin/activate && python {pythonpath}/manage.py collectstatic --noinput')
@task
def clear(c):
print(colorize('\nDeleting sessions...', fg='white'))
c.inline_ssh_env = True
c.run(f'source {code_dir}/.env && cd {code_dir} && source env/bin/activate && python {pythonpath}/manage.py clearsessions', env={'DEBUG': '$DEBUG', 'DATABASE_PASSWORD': '$DATABASE_PASSWORD'})
@task
def restart(c):
print(colorize('\nRestarting web server...\n', fg='white'))
c.run(f'sudo systemctl restart {service_name}')
c.run(f'sudo systemctl status {service_name}')
print('')
c.run('sudo systemctl restart nginx')
c.run('sudo systemctl status nginx')
@task(hosts=hosts)
def deploy(c):
print(colorize('\nStarting deploy... 👌', fg='green'))
try:
update(c)
install(c)
migrate(c)
collect(c)
# clear(c)
restart(c)
print(colorize('\nDeploy succeeded 🎉', fg='green'))
except:
print(colorize('\nDeploy failed ❌', fg='red'))
| bsd-3-clause | 7,770,267,891,711,363,000 | 26.910256 | 199 | 0.637115 | false |
Baymaxteam/SmartHomeDjango | config/settings/common.py | 1 | 9075 | # -*- coding: utf-8 -*-
"""
Django settings for SmartHome project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
from datetime import timedelta
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('SmartHome')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'rest_framework', # Rest_framework
'corsheaders'
)
# Apps specific for this project go here.
LOCAL_APPS = (
'SmartHome.users', # custom users app
# Your stuff: custom apps go here
'SmartHome.api', #Rest api
'SmartHome.node', #node management
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'SmartHome.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""SamKuo""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres://django:[email protected]:5432/smarthome"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Taipei'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
########## CELERY
INSTALLED_APPS += ('SmartHome.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
#BROKER_URL = env("CELERY_BROKER_URL", default='django://')
BROKER_URL = env("CELERY_BROKER_URL", default='redis://localhost:6379/0')
RESULT_BACKEND = env("CELERY_RESULT_BACKEND", default = 'redis://localhost:6379/0')
CELERY_TIMEZONE = 'Asia/Taipei'
CELERYBEAT_SCHEDULE = {
'Periodic-every-2-seconds': {
'task': 'SmartHome.node.tasks.nodeCurrentRepo',
'schedule': timedelta(seconds=2),
}
}
########## END CELERY
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
| bsd-3-clause | 7,252,114,172,556,954,000 | 34.728346 | 114 | 0.615427 | false |
CDE-UNIBE/qcat | apps/sample/tests/test_search.py | 1 | 9992 | # Prevent logging of Elasticsearch queries
import logging
import pytest
logging.disable(logging.CRITICAL)
import collections
from django.db.models import Q
from qcat.tests import TestCase
from questionnaire.models import Questionnaire
from questionnaire.utils import get_list_values
from search.search import advanced_search
from search.tests.test_index import create_temp_indices
FilterParam = collections.namedtuple(
'FilterParam',
['questiongroup', 'key', 'values', 'operator', 'type'])
@pytest.mark.usefixtures('es')
class AdvancedSearchTest(TestCase):
fixtures = [
'global_key_values',
'sample',
'samplemulti',
'sample_questionnaires_search',
]
def setUp(self):
create_temp_indices([('sample', '2015'), ('samplemulti', '2015')])
def test_advanced_search(self):
filter_param = FilterParam(
questiongroup='qg_11', key='key_14', values=['value_14_1'],
operator='eq', type='image_checkbox')
key_search = advanced_search(
filter_params=[filter_param],
configuration_codes=['sample']).get('hits')
self.assertEqual(key_search.get('total'), 2)
filter_param = FilterParam(
questiongroup='qg_11', key='key_14', values=['value_14_2'],
operator='eq', type='image_checkbox')
key_search = advanced_search(
filter_params=[filter_param],
configuration_codes=['sample']).get('hits')
self.assertEqual(key_search.get('total'), 1)
def test_advanced_search_single_filter(self):
filter_param = FilterParam(
questiongroup='qg_11', key='key_14', values=['value_14_1'],
operator='eq', type='image_checkbox')
search = advanced_search(
filter_params=[filter_param], configuration_codes=['sample']
).get('hits')
self.assertEqual(search.get('total'), 2)
def test_advanced_search_multiple_arguments(self):
query_string = 'key'
filter_param = FilterParam(
questiongroup='qg_35', key='key_48', values=['value_1'],
operator='eq', type='radio')
search = advanced_search(
filter_params=[filter_param],
query_string=query_string,
configuration_codes=['sample']
).get('hits')
self.assertEqual(search.get('total'), 1)
hit_ids = [r.get('_id') for r in search.get('hits')]
self.assertEqual(hit_ids, ['1'])
def test_advanced_search_multiple_arguments_match_one(self):
query_string = 'key'
filter_param = FilterParam(
questiongroup='qg_35', key='key_48', values=['value_1'],
operator='eq', type='radio')
search = advanced_search(
filter_params=[filter_param],
query_string=query_string,
configuration_codes=['sample'],
match_all=False
).get('hits')
self.assertEqual(search.get('total'), 2)
hit_ids = [r.get('_id') for r in search.get('hits')]
self.assertEqual(hit_ids, ['2', '1'])
def test_advanced_search_multiple_arguments_2_match_one(self):
query_string = 'key'
filter_param = FilterParam(
questiongroup='qg_11', key='key_14', values=['value_14_1'],
operator='eq', type='image_checkbox')
search = advanced_search(
filter_params=[filter_param],
query_string=query_string,
configuration_codes=['sample'],
match_all=False
).get('hits')
self.assertEqual(search.get('total'), 3)
hit_ids = [r.get('_id') for r in search.get('hits')]
self.assertEqual(hit_ids, ['2', '1', '5'])
def test_advanced_search_multiple_arguments_2(self):
query_string = 'key'
filter_param = FilterParam(
questiongroup='qg_11', key='key_14', values=['value_14_1'],
operator='eq', type='image_checkbox')
search = advanced_search(
filter_params=[filter_param],
query_string=query_string,
configuration_codes=['sample']
).get('hits')
self.assertEqual(search.get('total'), 1)
hit_ids = [r.get('_id') for r in search.get('hits')]
self.assertEqual(hit_ids, ['1'])
def test_advanced_search_multiple_arguments_same_filter(self):
filter_param = FilterParam(
questiongroup='qg_11', key='key_14',
values=['value_14_1', 'value_14_3'],
operator='eq', type='image_checkbox')
search = advanced_search(
filter_params=[filter_param],
configuration_codes=['sample']
).get('hits')
self.assertEqual(search.get('total'), 3)
hit_ids = [r.get('_id') for r in search.get('hits')]
self.assertEqual(hit_ids, ['1', '5', '4'])
def test_advanced_search_multiple_arguments_same_filter_2(self):
filter_param_1 = FilterParam(
questiongroup='qg_11', key='key_14',
values=['value_14_1', 'value_14_3'],
operator='eq', type='image_checkbox')
filter_param_2 = FilterParam(
questiongroup='qg_35', key='key_48', values=['value_3'],
operator='eq', type='radio')
search = advanced_search(
filter_params=[filter_param_1, filter_param_2],
configuration_codes=['sample']
).get('hits')
self.assertEqual(search.get('total'), 1)
hit_ids = [r.get('_id') for r in search.get('hits')]
self.assertEqual(hit_ids, ['4'])
def test_advanced_search_multiple_arguments_same_filter_2_match_one(self):
filter_param_1 = FilterParam(
questiongroup='qg_11', key='key_14',
values=['value_14_1', 'value_14_3'],
operator='eq', type='image_checkbox')
filter_param_2 = FilterParam(
questiongroup='qg_35', key='key_48', values=['value_2'],
operator='eq', type='radio')
search = advanced_search(
filter_params=[filter_param_1, filter_param_2],
configuration_codes=['sample'],
match_all=False,
).get('hits')
self.assertEqual(search.get('total'), 4)
hit_ids = [r.get('_id') for r in search.get('hits')]
self.assertListEqual(hit_ids, ['1', '2', '5', '4'])
def test_advanced_search_gte(self):
filter_param = FilterParam(
questiongroup='qg_11', key='key_14', values=['2'],
operator='gte', type='image_checkbox')
with self.assertRaises(NotImplementedError):
search = advanced_search(
filter_params=[filter_param],
configuration_codes=['sample']
).get('hits')
self.assertEqual(search.get('total'), 2)
hit_ids = [r.get('_id') for r in search.get('hits')]
self.assertEqual(hit_ids, ['4', '1'])
def test_advanced_search_lt(self):
filter_param = FilterParam(
questiongroup='qg_11', key='key_14', values=['2'],
operator='lt', type='image_checkbox')
with self.assertRaises(NotImplementedError):
search = advanced_search(
filter_params=[filter_param],
configuration_codes=['sample']
).get('hits')
self.assertEqual(search.get('total'), 2)
hit_ids = [r.get('_id') for r in search.get('hits')]
self.assertEqual(hit_ids, ['5', '1'])
def test_advanced_search_lte(self):
filter_param = FilterParam(
questiongroup='qg_35', key='key_48', values=['2'],
operator='lte', type='radio')
with self.assertRaises(NotImplementedError):
search = advanced_search(
filter_params=[filter_param],
configuration_codes=['sample']
).get('hits')
self.assertEqual(search.get('total'), 2)
hit_ids = [r.get('_id') for r in search.get('hits')]
self.assertEqual(hit_ids, ['2', '1'])
def test_advanced_search_gte_lte(self):
filter_param_1 = FilterParam(
questiongroup='qg_11', key='key_14', values=['1'],
operator='lte', type='image_checkbox')
filter_param_2 = FilterParam(
questiongroup='qg_11', key='key_14', values=['3'],
operator='gte', type='image_checkbox')
with self.assertRaises(NotImplementedError):
search = advanced_search(
filter_params=[filter_param_1, filter_param_2],
configuration_codes=['sample'],
match_all=False,
).get('hits')
self.assertEqual(search.get('total'), 3)
hit_ids = [r.get('_id') for r in search.get('hits')]
self.assertEqual(hit_ids, ['5', '4', '1'])
@pytest.mark.usefixtures('es')
class GetListValuesTest(TestCase):
fixtures = [
'global_key_values',
'sample',
'samplemulti',
'sample_questionnaires_search',
]
def setUp(self):
create_temp_indices([('sample', '2015'), ('samplemulti', '2015')])
def test_returns_same_result_for_es_search_and_db_objects(self):
es_hits = advanced_search(
filter_params=[], query_string='key',
configuration_codes=['sample'])
res_1 = get_list_values(
configuration_code='sample', es_hits=es_hits.get(
'hits', {}).get('hits', []))
ids = [q.get('id') for q in res_1]
res_2 = get_list_values(
configuration_code='sample',
questionnaire_objects=Questionnaire.objects.filter(pk__in=ids),
status_filter=Q())
for res in [res_1, res_2]:
for r in res:
self.assertEqual(r.get('configuration'), 'sample')
self.assertIn('key_1', r)
self.assertIn('key_5', r)
self.assertIn('created', r)
self.assertIn('updated', r)
| apache-2.0 | -4,025,634,056,334,303,000 | 38.184314 | 78 | 0.563651 | false |
espenhgn/hybridLFPy | examples/example_microcircuit_params_lognormalweights.py | 1 | 38825 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Modified parameters file for the Hybrid LFP scheme, applying the methodology
with the model of:
Potjans, T. and Diesmann, M. "The Cell-Type Specific Cortical Microcircuit:
Relating Structure and Activity in a Full-Scale Spiking Network Model".
Cereb. Cortex (2014) 24 (3): 785-806.
doi: 10.1093/cercor/bhs358
'''
import numpy as np
import os
import json
from mpi4py import MPI # this is needed to initialize other classes correctly
import multiprocessing as mp # to facilitate OpenMP parallelization w. NEST
# if MPI.SIZE == 1
###################################
# Initialization of MPI stuff #
###################################
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
####################################
# HELPER FUNCTIONS #
####################################
def flattenlist(lst): return sum(sum(lst, []), [])
####################################
# SPATIAL CONNECTIVITY EXTRACTION #
####################################
'''
Include functions that extract information from binzegger.json here
'''
def get_F_y(fname='binzegger_connectivity_table.json', y=['p23']):
'''
Extract frequency of occurrences of those cell types that are modeled.
The data set contains cell types that are not modeled (TCs etc.)
The returned percentages are renormalized onto modeled cell-types,
i.e. they sum up to 1
'''
# Load data from json dictionary
f = open(fname, 'r')
data = json.load(f)
f.close()
occurr = []
for cell_type in y:
occurr += [data['data'][cell_type]['occurrence']]
return list(np.array(occurr) / np.sum(occurr))
def get_L_yXL(fname, y, x_in_X, L):
'''
compute the layer specificity, defined as:
::
L_yXL = k_yXL / k_yX
'''
def _get_L_yXL_per_yXL(fname, x_in_X, X_index,
y, layer):
# Load data from json dictionary
f = open(fname, 'r')
data = json.load(f)
f.close()
# Get number of synapses
if layer in [str(key)
for key in list(data['data'][y]['syn_dict'].keys())]:
# init variables
k_yXL = 0
k_yX = 0
for x in x_in_X[X_index]:
p_yxL = data['data'][y]['syn_dict'][layer][x] / 100.
k_yL = data['data'][y]['syn_dict'][layer]['number of synapses per neuron']
k_yXL += p_yxL * k_yL
for l in [str(key)
for key in list(data['data'][y]['syn_dict'].keys())]:
for x in x_in_X[X_index]:
p_yxL = data['data'][y]['syn_dict'][l][x] / 100.
k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron']
k_yX += p_yxL * k_yL
if k_yXL != 0.:
return k_yXL / k_yX
else:
return 0.
else:
return 0.
# init dict
L_yXL = {}
# iterate over postsynaptic cell types
for y_value in y:
# container
data = np.zeros((len(L), len(x_in_X)))
# iterate over lamina
for i, Li in enumerate(L):
# iterate over presynapse population inds
for j in range(len(x_in_X)):
data[i][j] = _get_L_yXL_per_yXL(fname, x_in_X,
X_index=j,
y=y_value,
layer=Li)
L_yXL[y_value] = data
return L_yXL
def get_T_yX(fname, y, y_in_Y, x_in_X, F_y):
'''
compute the cell type specificity, defined as:
::
T_yX = K_yX / K_YX
= F_y * k_yX / sum_y(F_y*k_yX)
'''
def _get_k_yX_mul_F_y(y, y_index, X_index):
# Load data from json dictionary
f = open(fname, 'r')
data = json.load(f)
f.close()
# init variables
k_yX = 0.
for l in [str(key)
for key in list(data['data'][y]['syn_dict'].keys())]:
for x in x_in_X[X_index]:
p_yxL = data['data'][y]['syn_dict'][l][x] / 100.
k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron']
k_yX += p_yxL * k_yL
return k_yX * F_y[y_index]
# container
T_yX = np.zeros((len(y), len(x_in_X)))
# iterate over postsynaptic cell types
for i, y_value in enumerate(y):
# iterate over presynapse population inds
for j in range(len(x_in_X)):
k_yX_mul_F_y = 0
for k, yy in enumerate(sum(y_in_Y, [])):
if y_value in yy:
for yy_value in yy:
ii = np.where(np.array(y) == yy_value)[0][0]
k_yX_mul_F_y += _get_k_yX_mul_F_y(yy_value, ii, j)
if k_yX_mul_F_y != 0:
T_yX[i, j] = _get_k_yX_mul_F_y(y_value, i, j) / k_yX_mul_F_y
return T_yX
class general_params(object):
'''class defining general model parameters'''
def __init__(self):
'''class defining general model parameters'''
####################################
# REASON FOR THIS SIMULATION #
####################################
self.reason = 'Default Potjans model with spontaneous activity'
####################################
# #
# #
# SIMULATION PARAMETERS #
# #
# #
####################################
####################################
# MAIN SIMULATION CONTROL #
####################################
# simulation step size
self.dt = 0.1
# simulation start
self.tstart = 0
# simulation stop
self.tstop = 1200
####################################
# OUTPUT LOCATIONS #
####################################
# folder for all simulation output and scripts
# using the cluster's dedicated SCRATCH area
if 'SCRATCH' in os.environ and os.path.isdir(
os.path.join(os.environ['SCRATCH'], os.environ['USER'])):
self.savefolder = os.path.join(
os.environ['SCRATCH'],
os.environ['USER'],
'hybrid_model',
'simulation_output_example_microcircuit_lognormalsweights')
# LOCALLY
else:
self.savefolder = 'simulation_output_example_microcircuit_lognormweights'
# folder for simulation scripts
self.sim_scripts_path = os.path.join(self.savefolder, 'sim_scripts')
# folder for each individual cell's output
self.cells_path = os.path.join(self.savefolder, 'cells')
# folder for figures
self.figures_path = os.path.join(self.savefolder, 'figures')
# folder for population resolved output signals
self.populations_path = os.path.join(self.savefolder, 'populations')
# folder for raw nest output files
self.raw_nest_output_path = os.path.join(self.savefolder,
'raw_nest_output')
# folder for processed nest output files
self.spike_output_path = os.path.join(self.savefolder,
'processed_nest_output')
####################################
# #
# #
# MODEL PARAMETERS #
# #
# #
####################################
####################################
# POPULATIONS #
####################################
# Number of populations
self.Npops = 9
# number of neurons in each population (unscaled)
self.full_scale_num_neurons = [[20683, # layer 23 e
5834], # layer 23 i
[21915, # layer 4 e
5479], # layer 4 i
[4850, # layer 5 e
1065], # layer 5 i
[14395, # layer 6 e
2948]] # layer 6 i
# Number of thalamic neurons/ point processes
self.n_thal = 902
# population names TODO: rename
self.X = [
'TC',
'L23E',
'L23I',
'L4E',
'L4I',
'L5E',
'L5I',
'L6E',
'L6I']
self.Y = self.X[1:]
# TC and cortical population sizes in one list TODO: rename
self.N_X = np.array([self.n_thal] +
flattenlist([self.full_scale_num_neurons]))
####################################
# CONNECTIVITY #
####################################
# intra-cortical connection probabilities between populations
# 23e 23i 4e 4i 5e 5i 6e 6i
self.conn_probs = np.array([[0.1009, 0.1689, 0.0437, 0.0818,
0.0323, 0., 0.0076, 0.], # 23e
[0.1346, 0.1371, 0.0316, 0.0515,
0.0755, 0., 0.0042, 0.], # 23i
[0.0077, 0.0059, 0.0497, 0.135,
0.0067, 0.0003, 0.0453, 0.], # 4e
[0.0691, 0.0029, 0.0794, 0.1597,
0.0033, 0., 0.1057, 0.], # 4i
[0.1004, 0.0622, 0.0505, 0.0057,
0.0831, 0.3726, 0.0204, 0.], # 5e
[0.0548, 0.0269, 0.0257, 0.0022,
0.06, 0.3158, 0.0086, 0.], # 5i
[0.0156, 0.0066, 0.0211, 0.0166, 0.0572,
0.0197, 0.0396, 0.2252], # 6e
[0.0364, 0.001, 0.0034, 0.0005,
0.0277, 0.008, 0.0658, 0.1443]]) # 6i
self.conn_probs *= 1.0
# connection probabilities for thalamic input
self.C_th = [[0.0, # layer 23 e
0.0], # layer 23 i
[0.0983, # layer 4 e
0.0619], # layer 4 i
[0.0, # layer 5 e
0.0], # layer 5 i
[0.0512, # layer 6 e
0.0196]] # layer 6 i
# full connection probabilities including TC connections
self.C_YX = np.c_[flattenlist([self.C_th]), self.conn_probs]
####################################
# CONNECTION PROPERTIES #
####################################
# mean EPSP amplitude (mV) for all connections except L4e->L23e
self.PSP_e = 0.15
# mean EPSP amplitude (mv) for L4e->L23e connections
# FIX POLISH NOTATION !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
self.PSP_23e_4e = self.PSP_e * 2
# standard deviation of PSC amplitudes relative to mean PSC amplitudes
# this is sigma/mu in probability distribution
# Gaussian (lognormal_weights = False): mu is mean, sigma is standard deviation
# Lognormal (lognormal_weights = False): mean and stdev can be
# calculated from mu and sigma
self.PSC_rel_sd = 3.0
# IPSP amplitude relative to EPSP amplitude
self.g = -4.
# set L4i ->L4e stronger in order to get rid of 84 Hz peak
self.g_4e_4i = self.g * 1.15
# Whether to use lognormal weights or not
self.lognormal_weights = True
# mean dendritic delays for excitatory and inhibitory transmission (ms)
self.delays = [1.5, 0.75]
# standard deviation relative to mean delays
self.delay_rel_sd = 0.5
####################################
# CELL-TYPE PARAMETERS #
####################################
# Note that these parameters are only relevant for the point-neuron network in case
# one wants to calculate depth-resolved cell-type specific input
# currents
# point to .json connectivity table file
self.connectivity_table = 'binzegger_connectivity_table.json'
# list of cell type names used in this script
# names of every post-syn pop layer
self.y_in_Y = [
[['p23'], ['b23', 'nb23']],
[['p4', 'ss4(L23)', 'ss4(L4)'], ['b4', 'nb4']],
[['p5(L23)', 'p5(L56)'], ['b5', 'nb5']],
[['p6(L4)', 'p6(L56)'], ['b6', 'nb6']]]
self.y = flattenlist(self.y_in_Y)
# need presynaptic cell type to population mapping
self.x_in_X = [['TCs', 'TCn']] + sum(self.y_in_Y, [])
# map the pre-synaptic populations to the post-syn populations
self.mapping_Yy = list(zip(
['L23E', 'L23I', 'L23I',
'L4E', 'L4E', 'L4E', 'L4I', 'L4I',
'L5E', 'L5E', 'L5I', 'L5I',
'L6E', 'L6E', 'L6I', 'L6I'],
self.y))
# Frequency of occurrence of each cell type (F_y); 1-d array
self.F_y = get_F_y(fname=self.connectivity_table, y=self.y)
# Relative frequency of occurrence of each cell type within its
# population (F_{y,Y})
self.F_yY = [[get_F_y(fname=self.connectivity_table, y=y)
for y in Y] for Y in self.y_in_Y]
# Number of neurons of each cell type (N_y); 1-d array
self.N_y = np.array([self.full_scale_num_neurons[layer][pop] * self.F_yY[layer][pop][k]
for layer, array in enumerate(self.y_in_Y)
for pop, cell_types in enumerate(array)
for k, _ in enumerate(cell_types)]).astype(int)
# compute the number of synapses as in Potjans&Diesmann 2012
K_YX = np.zeros(self.C_YX.shape)
for i in range(K_YX.shape[1]):
K_YX[:, i] = (np.log(1. - self.C_YX[:, i]) /
np.log(1. - 1. / (self.N_X[1:] *
self.N_X[i])))
# spatial connection probabilites on each subpopulation
# Each key must correspond to a subpopulation like 'L23E' used everywhere else,
# each array maps thalamic and intracortical connections.
# First column is thalamic connections, and the rest intracortical,
# ordered like 'L23E', 'L23I' etc., first row is normalised probability of
# connection withing L1, L2, etc.;
self.L_yXL = get_L_yXL(fname=self.connectivity_table,
y=self.y,
x_in_X=self.x_in_X,
L=['1', '23', '4', '5', '6'])
# compute the cell type specificity
self.T_yX = get_T_yX(fname=self.connectivity_table, y=self.y,
y_in_Y=self.y_in_Y, x_in_X=self.x_in_X,
F_y=self.F_y)
Y, y = list(zip(*self.mapping_Yy))
# assess relative distribution of synapses for a given celltype
self.K_yXL = {}
#self.T_yX = {}
for i, (Y, y) in enumerate(self.mapping_Yy):
# fill in K_yXL (layer specific connectivity)
self.K_yXL[y] = (self.T_yX[i, ] *
K_YX[np.array(self.Y) == Y, ] *
self.L_yXL[y]).astype(int)
# number of incoming connections per cell type per layer per cell
self.k_yXL = {}
for y, N_y in zip(self.y, self.N_y):
self.k_yXL.update({y: (1. * self.K_yXL[y]).astype(int) // N_y})
# calculate corresponding connectivity to K_yXL
self.C_yXL = {}
for y, N_y in zip(self.y, self.N_y):
self.C_yXL.update(
{y: 1. - (1. - 1. / (N_y * self.N_X))**self.K_yXL[y]})
##########################################################################
class point_neuron_network_params(general_params):
'''class point-neuron network parameters'''
def __init__(self):
'''class point-neuron network parameters'''
# inherit general params
general_params.__init__(self)
####################################
# #
# #
# SIMULATION PARAMETERS #
# #
# #
####################################
# use same number of threads as MPI COMM.size() for parallel jobs
# else the number of processors for serial jobs
if SIZE > 1:
self.total_num_virtual_procs = SIZE
else:
self.total_num_virtual_procs = mp.cpu_count()
####################################
# RNG PROPERTIES #
####################################
# offset for RNGs
self.seed_offset = 45
####################################
# RECORDING PARAMETERS #
####################################
self.overwrite_existing_files = True
# recording can either be done from a fraction of neurons in each
# population or from a fixed number
# whether to record spikes from a fixed fraction of neurons in each
# population.
self.record_fraction_neurons_spikes = True
if self.record_fraction_neurons_spikes:
self.frac_rec_spikes = 1.
else:
self.n_rec_spikes = 100
# whether to record membrane potentials from a fixed fraction of
# neurons in each population
self.record_fraction_neurons_voltage = False
if self.record_fraction_neurons_voltage:
self.frac_rec_voltage = 0.1
else:
self.n_rec_voltage = 0
# whether to record weighted input spikes from a fixed fraction of
# neurons in each population
self.record_fraction_neurons_input_spikes = False
if self.record_fraction_neurons_input_spikes:
self.frac_rec_input_spikes = 0.1
else:
self.n_rec_input_spikes = 0
# number of recorded neurons for depth resolved input currents
self.n_rec_depth_resolved_input = 0
# NESTio recording format
self.record_to = 'ascii'
# whether to record thalamic spikes
self.record_thalamic_spikes = True
# global ID file name
self.GID_filename = 'population_GIDs.dat'
# readout global ID file name
self.readout_GID_filename = 'readout_GIDs.dat'
# stem for spike detector file labels
self.spike_recorder_label = 'spikes_'
# stem for voltmeter file labels
self.voltmeter_label = 'voltages_'
# stem for thalamic spike detector file labels
self.th_spike_recorder_label = 'spikes_0'
# stem for in-degree file labels
self.in_degree_label = 'in_degrees_'
# stem for file labels for in-degree from thalamus
self.th_in_degree_label = 'in_degrees_th_'
# stem for weighted input spikes labels
self.weighted_input_spikes_label = 'weighted_input_spikes_'
####################################
# #
# #
# MODEL PARAMETERS #
# #
# #
####################################
####################################
# SCALING #
####################################
# scaling parameter for population sizes
self.area = 1.0
# preserve indegrees when downscaling
self.preserve_K = False
####################################
# SINGLE NEURON PARAMS #
####################################
# neuron model
self.neuron_model = '/iaf_psc_exp'
# mean of initial membrane potential (mV)
self.Vm0_mean = -58.0
# std of initial membrane potential (mV)
self.Vm0_std = 10.0
# mean of threshold potential (mV)
self.V_th_mean = -50.
# std of threshold potential (mV)
self.V_th_std = 1E-8 # nest::NormalParameter: std > 0 required.
self.model_params = {'tau_m': 10., # membrane time constant (ms)
# excitatory synaptic time constant (ms)
'tau_syn_ex': 0.5,
# inhibitory synaptic time constant (ms)
'tau_syn_in': 0.5,
# absolute refractory period (ms)
't_ref': 2.,
# resting membrane potential (mV)
'E_L': -65.,
'V_th': self.V_th_mean, # spike threshold (mV)
'C_m': 250., # membrane capacitance (pF)
'V_reset': -65. # reset potential (mV)
}
####################################
# EXTERNAL INPUTS #
####################################
# number of external inputs (Potjans-Diesmann model 2012)
self.K_bg = [[1600, # layer 23 e
1500], # layer 23 i
[2100, # layer 4 e
1900], # layer 4 i
[2000, # layer 5 e
1900], # layer 5 i
[2900, # layer 6 e
2100]] # layer 6 i
# rate of Poisson input at each external input synapse (spikess)
self.bg_rate = 0.
# rate of equivalent input used for DC amplitude calculation,
# set to zero if self.bg_rate > 0.
self.bg_rate_dc = 8.
# DC amplitude at each external input synapse (pA)
# to each neuron via 'dc_amplitude = tau_syn_ex/1000*bg_rate*PSC_ext'
self.dc_amplitude = self.model_params["tau_syn_ex"] * \
self.bg_rate_dc * self._compute_J()
# mean EPSP amplitude (mV) for thalamic and non-thalamic external input
# spikes
self.PSP_ext = 0.15
# mean delay of thalamic input (ms)
self.delay_th = 1.5
# standard deviation relative to mean delay of thalamic input
self.delay_th_rel_sd = 0.5
####################################
# THALAMIC INPUT VERSIONS #
####################################
# off-option for start of thalamic input versions
self.off = 100. * self.tstop
# poisson_generator (pure Poisson input)
self.th_poisson_start = self.off # onset (ms)
self.th_poisson_duration = 10. # duration (ms)
self.th_poisson_rate = 120. # rate (spikess)
# spike_generator
# Note: This can be used with a large Gaussian delay distribution in order to mimic a
# Gaussian pulse packet which is different for each thalamic neuron
self.th_spike_times = [self.off] # time of the thalamic pulses (ms)
# create n_thal spikegenerator nodes connected to each respective
# postsynaptic parrot_neuron. Expected format is a len(self.n_thal) list
# of lists of activation times.
# Turn activation off by setting it as [[] for i in range(self.n_thal)]
self.th_spike_generator_times = [[] for i in range(self.n_thal)]
# sinusoidal_poisson_generator (oscillatory Poisson input)
self.th_sin_start = self.off # onset (ms)
self.th_sin_duration = 5000. # duration (ms)
self.th_sin_mean_rate = 30. # mean rate (spikess)
# rate modulation amplitude (spikess)
self.th_sin_fluc_rate = 30.
# frequency of the rate modulation (Hz)
self.th_sin_freq = 15.
# phase of rate modulation (deg)
self.th_sin_phase = 0.
# Gaussian_pulse_packages
self.th_gauss_times = [self.off] # package center times
self.th_gauss_num_spikes_per_packet = 1 # number of spikes per packet
self.th_gauss_sd = 5. # std of Gaussian pulse packet (ms^2)
####################################
# SPATIAL ORGANIZATION #
####################################
# needed for spatially resolved input currents
# number of layers TODO: find a better solution for that
self.num_input_layers = 5
def _compute_J(self):
'''
Compute the current amplitude corresponding to the exponential
synapse model PSP amplitude
Derivation using sympy:
::
from sympy import *
#define symbols
t, tm, Cm, ts, Is, Vmax = symbols('t tm Cm ts Is Vmax')
#assume zero delay, t >= 0
#using eq. 8.10 in Sterrat et al
V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm
print 'V = %s' % V
#find time of V == Vmax
dVdt = diff(V, t)
print 'dVdt = %s' % dVdt
[t] = solve(dVdt, t)
print 't(t@dVdT==Vmax) = %s' % t
#solve for Is at time of maxima
V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm
print 'V(%s) = %s' % (t, V)
[Is] = solve(V-Vmax, Is)
print 'Is = %s' % Is
resulting in:
::
Cm*Vmax*(-tm + ts)/(tm*ts*(exp(tm*log(ts/tm)/(tm - ts))
- exp(ts*log(ts/tm)/(tm - ts))))
'''
# LIF params
tm = self.model_params['tau_m']
Cm = self.model_params['C_m']
# synapse
ts = self.model_params['tau_syn_ex']
Vmax = self.PSP_e
# max current amplitude
J = Cm * Vmax * (-tm + ts) / (tm * ts * (np.exp(tm * np.log(ts / tm) /
(tm - ts)) - np.exp(ts * np.log(ts / tm) / (tm - ts))))
# unit conversion pF*mV -> nA
J *= 1E-3
return J
class multicompartment_params(point_neuron_network_params):
'''
Inherited class defining additional attributes needed by e.g., the
classes population.Population and population.DummyNetwork
This class do not take any kwargs
'''
def __init__(self):
'''
Inherited class defining additional attributes needed by e.g., the
classes population.Population and population.DummyNetwork
This class do not take any kwargs
'''
# initialize parent classes
point_neuron_network_params.__init__(self)
####################################
# #
# #
# SIMULATION PARAMETERS #
# #
# #
####################################
#######################################
# PARAMETERS FOR LOADING NEST RESULTS #
#######################################
# parameters for class population.DummyNetwork class
self.networkSimParams = {
'simtime': self.tstop - self.tstart,
'dt': self.dt,
'spike_output_path': self.spike_output_path,
'label': 'population_spikes',
'ext': 'dat',
'GIDs': self.get_GIDs(),
'X': self.X,
'skiprows': 0,
}
####################################
# #
# #
# MODEL PARAMETERS #
# #
# #
####################################
####################################
# SCALING (VOLUME not density) #
####################################
self.SCALING = 1.0
####################################
# MORPHOLOGIES #
####################################
# list of morphology files with default location, testing = True
# will point to simplified morphologies
testing = True
if testing:
self.PATH_m_y = os.path.join('morphologies', 'ballnsticks')
self.m_y = [Y + '_' + y + '.hoc' for Y, y in self.mapping_Yy]
else:
self.PATH_m_y = os.path.join('morphologies', 'stretched')
self.m_y = [
'L23E_oi24rpy1.hoc',
'L23I_oi38lbc1.hoc',
'L23I_oi38lbc1.hoc',
'L4E_53rpy1.hoc',
'L4E_j7_L4stellate.hoc',
'L4E_j7_L4stellate.hoc',
'L4I_oi26rbc1.hoc',
'L4I_oi26rbc1.hoc',
'L5E_oi15rpy4.hoc',
'L5E_j4a.hoc',
'L5I_oi15rbc1.hoc',
'L5I_oi15rbc1.hoc',
'L6E_51-2a.CNG.hoc',
'L6E_oi15rpy4.hoc',
'L6I_oi15rbc1.hoc',
'L6I_oi15rbc1.hoc',
]
####################################
# CONNECTION WEIGHTS #
####################################
# compute the synapse weight from fundamentals of exp synapse LIF
# neuron
self.J = self._compute_J()
# set up matrix containing the synapse weights between any population X
# and population Y, including exceptions for certain connections
J_YX = np.zeros(self.C_YX.shape)
J_YX += self.J
J_YX[:, 2::2] *= self.g
if hasattr(self, 'PSP_23e_4e'):
J_YX[0, 3] *= self.PSP_23e_4e / self.PSP_e
if hasattr(self, 'g_4e_4i'):
J_YX[2, 4] *= self.g_4e_4i / self.g
# extrapolate weights between populations X and
# cell type y in population Y
self.J_yX = {}
for Y, y in self.mapping_Yy:
[i] = np.where(np.array(self.Y) == Y)[0]
self.J_yX.update({y: J_YX[i, ]})
####################################
# GEOMETRY OF CORTICAL COLUMN #
####################################
# set the boundaries of each layer, L1->L6,
# and mean depth of soma layers
self.layerBoundaries = np.array([[0.0, -81.6],
[-81.6, -587.1],
[-587.1, -922.2],
[-922.2, -1170.0],
[-1170.0, -1491.7]])
# assess depth of each 16 subpopulation
self.depths = self._calcDepths()
# make a nice structure with data for each subpopulation
self.y_zip_list = list(zip(self.y, self.m_y,
self.depths, self.N_y))
##############################################################
# POPULATION PARAMS (cells, population, synapses, electrode) #
##############################################################
# Global LFPy.Cell-parameters, by default shared between populations
# Some passive parameters will not be fully consistent with LIF params
self.cellParams = {
'v_init': self.model_params['E_L'],
'cm': 1.0,
'Ra': 150,
'passive': True,
'passive_parameters': dict(g_pas=1. / (self.model_params['tau_m'] * 1E3), # assume cm=1
e_pas=self.model_params['E_L']),
'nsegs_method': 'lambda_f',
'lambda_f': 100,
'dt': self.dt,
'tstart': self.tstart,
'tstop': self.tstop,
'verbose': False,
}
# layer specific LFPy.Cell-parameters as nested dictionary
self.yCellParams = self._yCellParams()
# set the axis of which each cell type y is randomly rotated,
# SS types and INs are rotated around both x- and z-axis
# in the population class, while P-types are
# only rotated around the z-axis
self.rand_rot_axis = {}
for y, _, _, _ in self.y_zip_list:
# identify pyramidal cell populations:
if y.rfind('p') >= 0:
self.rand_rot_axis.update({y: ['z']})
else:
self.rand_rot_axis.update({y: ['x', 'z']})
# additional simulation kwargs, see LFPy.Cell.simulate() docstring
self.simulationParams = {'rec_imem': True}
# a dict setting the number of cells N_y and geometry
# of cell type population y
self.populationParams = {}
for y, _, depth, N_y in self.y_zip_list:
self.populationParams.update({
y: {
'number': int(N_y * self.SCALING),
'radius': np.sqrt(1000**2 / np.pi),
'z_min': depth - 25,
'z_max': depth + 25,
'min_cell_interdist': 1.,
'min_r': [[-1E199, -1600, -1550, 1E99], [0, 0, 10, 10]]
}
})
# Set up cell type specific synapse parameters in terms of synapse model
# and synapse locations
self.synParams = {}
for y in self.y:
if y.rfind('p') >= 0:
# pyramidal types have apical dendrites
section = ['apic', 'dend']
else:
# other cell types do not
section = ['dend']
self.synParams.update({
y: {
'syntype': 'ExpSynI', # current based exponential synapse
'section': section,
# 'tau' : self.model_params["tau_syn_ex"],
},
})
# set up dictionary of synapse time constants specific to each
# postsynaptic cell type and presynaptic population
self.tau_yX = {}
for y in self.y:
self.tau_yX.update({
y: [self.model_params["tau_syn_in"] if 'I' in X else
self.model_params["tau_syn_ex"] for X in self.X]
})
# synaptic delay parameters, loc and scale is mean and std for every
# network population, negative values will be removed
self.synDelayLoc, self.synDelayScale = self._synDelayParams()
# Define electrode geometry corresponding to a laminar electrode,
# where contact points have a radius r, surface normal vectors N,
# and LFP calculated as the average LFP in n random points on
# each contact. Recording electrode emulate NeuroNexus array,
# contact 0 is superficial
self.electrodeParams = {
# contact locations:
'x': np.zeros(16),
'y': np.zeros(16),
'z': -np.mgrid[0:16] * 100,
# extracellular conductivity:
'sigma': 0.3,
# contact surface normals, radius, n-point averaging
'N': np.array([[1, 0, 0]] * 16),
'r': 7.5,
'n': 50,
'seedvalue': None,
# dendrite line sources, soma sphere source (Linden2014)
'method': 'root_as_point',
}
# parameters for LFPykit.LaminarCurrentSourceDensity
self.CSDParams = dict(
z=np.array([[-(i + 1) * 100, -i * 100] for i in range(16)]) + 50.,
r=np.ones(16) * np.sqrt(1000**2 / np.pi) # same as pop radius
)
# these cell attributes variables will be saved to file
self.savelist = []
#########################################
# MISC #
#########################################
# time resolution of downsampled data in ms
self.dt_output = 1.
# set fraction of neurons from population which LFP output is stored
self.recordSingleContribFrac = 0.
def get_GIDs(self):
GIDs = {}
ind = 1
for i, (X, N_X) in enumerate(zip(self.X, self.N_X)):
GIDs[X] = [ind, N_X]
ind += N_X
return GIDs
def _synDelayParams(self):
'''
set up the detailed synaptic delay parameters,
loc is mean delay,
scale is std with low bound cutoff,
assumes numpy.random.normal is used later
'''
delays = {}
# mean delays
loc = np.zeros((len(self.y), len(self.X)))
loc[:, 0] = self.delays[0]
loc[:, 1::2] = self.delays[0]
loc[:, 2::2] = self.delays[1]
# standard deviations
scale = loc * self.delay_rel_sd
# prepare output
delay_loc = {}
for i, y in enumerate(self.y):
delay_loc.update({y: loc[i]})
delay_scale = {}
for i, y in enumerate(self.y):
delay_scale.update({y: scale[i]})
return delay_loc, delay_scale
def _calcDepths(self):
'''
return the cortical depth of each subpopulation
'''
depths = self.layerBoundaries.mean(axis=1)[1:]
depth_y = []
for y in self.y:
if y in ['p23', 'b23', 'nb23']:
depth_y = np.r_[depth_y, depths[0]]
elif y in ['p4', 'ss4(L23)', 'ss4(L4)', 'b4', 'nb4']:
depth_y = np.r_[depth_y, depths[1]]
elif y in ['p5(L23)', 'p5(L56)', 'b5', 'nb5']:
depth_y = np.r_[depth_y, depths[2]]
elif y in ['p6(L4)', 'p6(L56)', 'b6', 'nb6']:
depth_y = np.r_[depth_y, depths[3]]
else:
raise Exception('Error, revise parameters')
return depth_y
def _yCellParams(self):
'''
Return dict with parameters for each population.
The main operation is filling in cell type specific morphology
'''
# cell type specific parameters going into LFPy.Cell
yCellParams = {}
for layer, morpho, _, _ in self.y_zip_list:
yCellParams.update({layer: self.cellParams.copy()})
yCellParams[layer].update({
'morphology': os.path.join(self.PATH_m_y, morpho),
})
return yCellParams
if __name__ == '__main__':
params = multicompartment_params()
print(dir(params))
| gpl-3.0 | -5,546,768,391,567,412,000 | 35.082714 | 111 | 0.457051 | false |
MichSchli/QuestionAnsweringGCN | example_reader/graph_reader/graph_converter.py | 1 | 2831 | from example_reader.graph_reader.edge_type_utils import EdgeTypeUtils
from example_reader.graph_reader.graph import Graph
import numpy as np
class GraphConverter:
hypergraph_interface = None
edge_type_utils = None
def __init__(self, hypergraph_interface):
self.hypergraph_interface = hypergraph_interface
self.edge_type_utils = EdgeTypeUtils()
def get_neighborhood_graph(self, entities):
hypergraph = self.hypergraph_interface.get_neighborhood_graph(entities)
graph = Graph()
graph.centroid_indexes = hypergraph.centroid_indexes
graph.entity_centroid_paths = hypergraph.centroid_paths
graph.vertices = np.concatenate((hypergraph.entity_vertices, hypergraph.event_vertices))
graph.entity_vertex_indexes = np.arange(hypergraph.entity_vertices.shape[0], dtype=np.int32)
graph.update_general_vertex_to_entity_index_map()
#graph.nearby_centroid_map = []
#for vertex in hypergraph.entity_vertices:
# graph.nearby_centroid_map.append(hypergraph.get_nearby_centroids(vertex))
#for vertex in hypergraph.event_vertices:
# graph.nearby_centroid_map.append(hypergraph.get_nearby_centroids(vertex))
graph.edges = np.concatenate((hypergraph.entity_to_entity_edges,
hypergraph.event_to_entity_edges,
hypergraph.entity_to_event_edges))
graph.edge_types = [np.array([], dtype=np.int32) for _ in range(self.edge_type_utils.count_types())]
graph.edge_types[0] = np.arange(hypergraph.entity_to_entity_edges.shape[0], dtype=np.int32)
acc = hypergraph.entity_to_entity_edges.shape[0]
graph.edge_types[1] = np.arange(hypergraph.event_to_entity_edges.shape[0], dtype=np.int32) + acc
acc += hypergraph.event_to_entity_edges.shape[0]
graph.edge_types[2] = np.arange(hypergraph.entity_to_event_edges.shape[0], dtype=np.int32) + acc
vertex_name_map = {hypergraph.to_index(k):v for k,v in hypergraph.name_map.feature_map.items()}
graph.set_index_to_name_map(vertex_name_map)
entity_vertex_types = np.array([[1,0,0,0,0,0] for _ in range(hypergraph.entity_vertices.shape[0])], dtype=np.float32)
event_vertex_types = np.array([[0,1,0,0,0,0] for _ in range(hypergraph.event_vertices.shape[0])], dtype=np.float32)
if entity_vertex_types.shape[0] == 0:
entity_vertex_types = np.empty((0,6), dtype=np.float32)
if event_vertex_types.shape[0] == 0:
event_vertex_types = np.empty((0,6), dtype=np.float32)
graph.vertex_types = np.concatenate((entity_vertex_types, event_vertex_types))
graph.nearby_centroid_map = [hypergraph.nearby_centroid_map[entity] for entity in graph.vertices]
return graph | mit | 1,603,126,252,323,630,600 | 48.684211 | 125 | 0.671494 | false |
the-virtual-brain/tvb-hpc | tvb_hpc/rng.py | 1 | 3024 | # Copyright 2018 TVB-HPC contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SIMD friendly random number generation.
Currently uses only Philox4x64 which assumes 128-bit, but for GPU usage,
switch to a 32-bit. See include/Random123/index.html for details.
"""
import numpy as np
import ctypes
from .compiler import CppCompiler, Spec
from .utils import include_dir
rng_template = """
#include <Random123/philox.h>
#include <Random123/boxmuller.hpp>
extern "C" {
void tvb_rng(long long int seed, unsigned int nout,
float * __restrict out) {
// TODO other variants might vectorize better?
%(loop_pragma)s
for(unsigned int i=0; i<(nout/4); ++i) {
philox4x32_ctr_t ctr;
philox4x32_key_t key;
ctr.v[0] = seed + 4*i;
ctr.v[1] = seed + 4*i + 1;
ctr.v[2] = seed + 4*i + 2;
ctr.v[3] = seed + 4*i + 3;
philox4x32_ctr_t result = philox4x32(ctr, key);
r123::float2 normal = r123::boxmuller(result.v[0], result.v[1]);
out[i*4 + 0] = normal.x;
out[i*4 + 1] = normal.y;
r123::float2 normal2 = r123::boxmuller(result.v[2], result.v[3]);
out[i*4 + 2] = normal2.x;
out[i*4 + 3] = normal2.y;
}
}
}
"""
class RNG:
def __init__(self, comp: CppCompiler=None):
self.comp = comp or CppCompiler() # type: Compiler
# TODO consider loopy support for calling user functions / preamble
def generate_c(self, spec: Spec=None):
spec = spec or Spec()
self.comp.cflags += ['-I' + include_dir]
loop_pragma = ''
if spec.openmp:
loop_pragma = '#pragma omp parallel for'
decls = []
# decls += self.generate_alignments(['out'], spec)
return rng_template % {
'loop_pragma': loop_pragma,
'decls': '\n '.join(decls),
}
def build(self, spec):
self.dll = self.comp.build(self.generate_c(spec))
self.fn = self.dll.tvb_rng
self.fn.restype = None
self.fn.argtypes = [ctypes.c_longlong,
ctypes.c_uint,
ctypes.POINTER(ctypes.c_float)]
def fill(self, array, seed=42):
assert array.dtype == np.float32
self.fn(
self.fn.argtypes[0](seed),
self.fn.argtypes[1](array.size),
array.ctypes.data_as(self.fn.argtypes[2])
)
| apache-2.0 | -6,177,618,747,205,311,000 | 30.5 | 78 | 0.581019 | false |
google-research/google-research | t5_closed_book_qa/t5_cbqa/metrics_test.py | 1 | 2129 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""""Tests for T5 CBQA metrics."""
from absl.testing import absltest
from t5.evaluation import test_utils
from t5_closed_book_qa.t5_cbqa import metrics
class MetricsTest(test_utils.BaseMetricsTest):
def test_natural_questions_metrics(self):
targets = [
[('yes',), ('no',), ('yes',), ('maybe',)],
[('Ashley', 'Mary-Kate'), ('Ashley and Mary-Kate',)],
[('Colin', 'Adam')],
[('yes',), ('yes',), ('yes',)],
[('no', 'not really'), ('no',), ()],
[('no', 'not really'), ('no',), ()],
]
predictions = [
[('yes',)], # correct
[('Mary-Kate', 'Ashley')], # correct
[('Colin', 'Adam')], # correct, but not golden
[('no',)], # incorrect
[('no', 'Not really',)], # needs normalization
[()], # incorrect
]
self.assertDictClose(
metrics.natural_questions(targets, predictions),
{
'recall': 3/5*100,
'golden_answers': 5,
})
self.assertDictClose(
metrics.natural_questions(
targets, predictions,
non_null_threshold=1),
{
'recall': 4/6*100,
'golden_answers': 6,
})
self.assertDictClose(
metrics.natural_questions(
targets, predictions,
non_null_threshold=1,
normalize_answers=False),
{
'recall': 3/6*100,
'golden_answers': 6,
})
if __name__ == '__main__':
absltest.main()
| apache-2.0 | 3,396,258,173,902,193,700 | 27.77027 | 74 | 0.570221 | false |
asoc/snakewatch | snakewatch/action/Write.py | 1 | 2980 | """
This file is part of snakewatch.
snakewatch is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
snakewatch is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with snakewatch. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function, absolute_import, unicode_literals, division
import os
from ._ConfirmAction import ConfirmAction
from ..util import AbortError, ConfigError, ui_print
class WriteAction(ConfirmAction):
"""An Action that returns the line with possible colouring"""
instances = dict()
def __init__(self, cfg, ui_confirm):
self.mode = 'w' if cfg.get('truncate', False) else 'a'
if 'filename' in cfg:
filename = cfg['filename']
if filename.startswith('~'):
filename = os.path.expanduser(filename)
self.filename = os.path.abspath(filename)
super(WriteAction, self).__init__(cfg, ui_confirm, ['filename'])
WriteAction.open_file_instance(self)
@classmethod
def open_file_instance(cls, inst):
try:
file_instances = cls.instances[inst.filename]
except KeyError:
file_instances = list()
cls.instances[inst.filename] = file_instances
if file_instances:
inst.fp = file_instances[0]
if inst.fp.mode != inst.mode:
raise ConfigError('File {} is opened in conflicting modes.'.format(inst.filename))
else:
try:
inst.fp = open(inst.filename, inst.mode)
except (OSError, IOError) as err:
ui_print().error(
'Cannot open {} for writing.'.format(inst.filename),
str(err), sep='\n'
)
raise AbortError()
file_instances.append(inst)
@classmethod
def close_file_instance(cls, inst):
try:
file_instances = cls.instances[inst.filename]
except KeyError:
return
try:
file_instances.remove(inst)
except ValueError:
pass
if not file_instances:
inst.fp.close()
def run_on(self, line):
self.fp.write(line)
self.fp.flush()
os.fsync(self.fp)
return None
def release_resources(self):
WriteAction.close_file_instance(self)
def confirm_message(self):
return 'The file {} will be {}.'.format(
self.filename,
'overwritten' if self.mode == 'w' else 'written to',
)
| bsd-3-clause | 2,404,914,814,539,267,600 | 29.721649 | 98 | 0.614094 | false |
eddieantonio/statically-typed-python | my_hip_site/my_hip_site.py | 1 | 1932 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
from flask import Flask, request
from css import css as _
app = Flask(__name__)
@app.route('/')
def home():
return _('''
<form action="/" method="POST">
<label> <input name=a> </label><br />
<label> ÷ <input name=b> </label><br />
<button type=submit> Divide! </button>
</form>
''')
@app.route('/', methods=['POST'])
def divide_numbers():
a = request.form['a']
b = request.form['b']
answer = a / b
return _('''
<main>{a} ÷ {b} = {answer:.5f}</main>
'''.format(a=a, b=b, answer=answer))
if __name__ == '__main__':
app.run()
| unlicense | -7,519,536,901,940,282,000 | 31.711864 | 73 | 0.67513 | false |
makefu/bepasty-server | bepasty/views/display.py | 1 | 6026 | import errno
import time
from flask import current_app, render_template, Markup, url_for
from flask.views import MethodView
from werkzeug.exceptions import NotFound, Forbidden
from pygments import highlight
from pygments.lexers import get_lexer_for_mimetype
from pygments.util import ClassNotFound as NoPygmentsLexer
from ..utils.permissions import *
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.formatters import CustomHtmlFormatter
from ..utils._compat import iteritems
from . import blueprint
from .filelist import file_infos
def rendering_allowed(item_type, item_size, use_pygments, complete):
"""
check if rendering is allowed, checks for:
* whether the item is completely uploaded
* whether the size is within the configured limits for the content-type
"""
if not complete:
return False
if use_pygments:
# if we use pygments, special restrictions apply
item_type = 'HIGHLIGHT_TYPES'
# create a tuple list [(content_type_prefix, max_size), ...] with long prefixes first
ct_size = sorted(iteritems(current_app.config['MAX_RENDER_SIZE']), key=lambda e: len(e[0]), reverse=True)
for ct, size in ct_size:
if item_type.startswith(ct):
return item_size <= size
# there should be one entry with ct == '', so we should never get here:
return False
class DisplayView(MethodView):
def get(self, name):
if not may(READ):
raise Forbidden()
try:
item = current_app.storage.openwrite(name)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
raise NotFound()
raise
with item as item:
complete = item.meta['complete']
if not complete and not may(ADMIN):
error = 'Upload incomplete. Try again later.'
return render_template('error.html', heading=item.meta['filename'], body=error), 409
if item.meta['locked'] and not may(ADMIN):
raise Forbidden()
if delete_if_lifetime_over(item, name):
raise NotFound()
def read_data(item):
# reading the item for rendering is registered like a download
data = item.data.read(item.data.size, 0)
item.meta['timestamp-download'] = int(time.time())
return data
size = item.meta['size']
ct = item.meta['type']
try:
get_lexer_for_mimetype(ct)
use_pygments = True
ct_pygments = ct
except NoPygmentsLexer:
if ct.startswith('text/'):
# seems like we found a text type not supported by pygments
# use text/plain so we get a display with line numbers
use_pygments = True
ct_pygments = 'text/plain'
else:
use_pygments = False
if rendering_allowed(ct, size, use_pygments, complete):
if ct.startswith('text/x-bepasty-'):
# special bepasty items - must be first, don't feed to pygments
if ct == 'text/x-bepasty-list':
names = read_data(item).decode('utf-8').splitlines()
files = sorted(file_infos(names), key=lambda f: f['filename'])
rendered_content = Markup(render_template('filelist_tableonly.html', files=files))
else:
rendered_content = u"Can't render this content type."
elif ct.startswith('image/'):
src = url_for('bepasty.download', name=name)
rendered_content = Markup(u'<img src="%s" alt="the image" width="800">' % src)
elif ct.startswith('audio/'):
src = url_for('bepasty.download', name=name)
alt_msg = u'html5 audio element not supported by your browser.'
rendered_content = Markup(u'<audio controls src="%s">%s</audio>' % (src, alt_msg))
elif ct.startswith('video/'):
src = url_for('bepasty.download', name=name)
alt_msg = u'html5 video element not supported by your browser.'
rendered_content = Markup(u'<video controls src="%s">%s</video>' % (src, alt_msg))
elif ct in ['application/pdf', 'application/x-pdf', ]:
src = url_for('bepasty.inline', name=name)
link_txt = u'Click to see PDF'
rendered_content = Markup(u'<a href="%s">%s</a>' % (src, link_txt))
elif use_pygments:
text = read_data(item)
# TODO we don't have the coding in metadata
try:
text = text.decode('utf-8')
except UnicodeDecodeError:
# well, it is not utf-8 or ascii, so we can only guess...
text = text.decode('iso-8859-1')
lexer = get_lexer_for_mimetype(ct_pygments)
formatter = CustomHtmlFormatter(linenos='table', lineanchors="L",
lineparagraphs="L", anchorlinenos=True)
rendered_content = Markup(highlight(text, lexer, formatter))
else:
rendered_content = u"Can't render this content type."
else:
if not complete:
rendered_content = u"Rendering not allowed (not complete). Is it still being uploaded?"
else:
rendered_content = u"Rendering not allowed (too big?). Try download"
return render_template('display.html', name=name, item=item,
rendered_content=rendered_content)
blueprint.add_url_rule('/<itemname:name>', view_func=DisplayView.as_view('display'))
| bsd-2-clause | -7,702,975,127,156,539,000 | 44.651515 | 109 | 0.554597 | false |
dimtion/jml | inputFiles/opponents/team-roquette/greedy.py | 1 | 3203 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import lib.PyratApi as api
import lib.travelHeuristics as th
import lib.utils as ut
import time
import operator
BOT_NAME = "greedy"
PATH = []
METAGRAPH = {}
BESTPATH = {}
MOVING = False
EATENCOINS = []
NB_COINS_TO_COMPUTE = 5
CURRENTCOIN = []
# This function should not return anything, but should be used for a short preprocessing
def initializationCode (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
global METAGRAPH
global BESTPATHS
iniTime = time.time()
METAGRAPH, BESTPATHS = th.generateMetaGraph(mazeMap, playerLocation, coins)
api.debug(time.time() - iniTime)
return "Everything seems fine, let's start !"
def updateCoins (metaGraph, eatenCoins, elLocation):
if elLocation in metaGraph:
eatenCoins.append(elLocation)
return eatenCoins
def orderNodes(metaGraph, currentNode, eatenCoins):
temp = metaGraph[currentNode]
nodesList = [x for x in list(temp.items()) if x[0] not in eatenCoins]
nodesList.sort(key = operator.itemgetter(1))
return nodesList
def chooseCoin (metaGraph, playerLocation, eatenCoins):
# Determination des sommets à calculer avec l'algo naif
nodesToCompute = orderNodes(metaGraph, playerLocation, eatenCoins)
# Création du chemin par l'algo naif
besDis, bestPaths = th.travellingSalesman(playerLocation, nodesToCompute[:NB_COINS_TO_COMPUTE -1], 0, [])
return bestPaths[0]
# This is where you should write your code to determine the next direction
def determineNextMove (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
global MOVING
global METAGRAPH
global BESTPATHS
global EATENCOINS
global PATH
global CURRENTCOIN
EATENCOINS = updateCoins(METAGRAPH, EATENCOINS, opponentLocation)
EATENCOINS = updateCoins(METAGRAPH, EATENCOINS, playerLocation)
if MOVING :
if not PATH :
MOVING = False
if opponentLocation == CURRENTCOIN and playerLocation != CURRENTCOIN:
PATH = []
PATH = th.findNearestCoin(mazeMap, playerLocation, coins)
if not MOVING :
CURRENTCOIN = chooseCoin(METAGRAPH, playerLocation, EATENCOINS)
PATH = BESTPATHS[playerLocation][CURRENTCOIN]
PATH.pop()
MOVING = True
nextPos = PATH.pop()
return ut.convertPosesToDir(nextPos, playerLocation, mazeMap)
####
if __name__ == "__main__" :
# We let technical stuff happen
(mazeWidth, mazeHeight, mazeMap, preparationTime, turnTime, playerLocation, opponentLocation, coins, gameIsOver) = api.initGame(BOT_NAME)
initializationCode(mazeWidth, mazeHeight, mazeMap, preparationTime, playerLocation, opponentLocation, coins)
# We decide how to move and wait for the next step
while not gameIsOver :
(playerLocation, opponentLocation, coins, gameIsOver) = api.processNextInformation()
if gameIsOver :
break
nextMove = determineNextMove(mazeWidth, mazeHeight, mazeMap, turnTime, playerLocation, opponentLocation, coins)
api.writeToPipe(nextMove)
| mit | 1,538,619,702,752,280,000 | 26.358974 | 141 | 0.702281 | false |
vialectrum/vialectrum | electrum_ltc/gui/qt/util.py | 1 | 34021 | import asyncio
import os.path
import time
import sys
import platform
import queue
import traceback
import os
import webbrowser
from functools import partial, lru_cache
from typing import NamedTuple, Callable, Optional, TYPE_CHECKING, Union, List, Dict, Any
from PyQt5.QtGui import (QFont, QColor, QCursor, QPixmap, QStandardItem,
QPalette, QIcon, QFontMetrics, QShowEvent)
from PyQt5.QtCore import (Qt, QPersistentModelIndex, QModelIndex, pyqtSignal,
QCoreApplication, QItemSelectionModel, QThread,
QSortFilterProxyModel, QSize, QLocale)
from PyQt5.QtWidgets import (QPushButton, QLabel, QMessageBox, QHBoxLayout,
QAbstractItemView, QVBoxLayout, QLineEdit,
QStyle, QDialog, QGroupBox, QButtonGroup, QRadioButton,
QFileDialog, QWidget, QToolButton, QTreeView, QPlainTextEdit,
QHeaderView, QApplication, QToolTip, QTreeWidget, QStyledItemDelegate,
QMenu)
from electrum_ltc.i18n import _, languages
from electrum_ltc.util import FileImportFailed, FileExportFailed, make_aiohttp_session, resource_path
from electrum_ltc.util import PR_UNPAID, PR_PAID, PR_EXPIRED, PR_INFLIGHT, PR_UNKNOWN, PR_FAILED, PR_ROUTING
if TYPE_CHECKING:
from .main_window import ElectrumWindow
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
dialogs = []
pr_icons = {
PR_UNKNOWN:"warning.png",
PR_UNPAID:"unpaid.png",
PR_PAID:"confirmed.png",
PR_EXPIRED:"expired.png",
PR_INFLIGHT:"unconfirmed.png",
PR_FAILED:"warning.png",
PR_ROUTING:"unconfirmed.png",
}
# filter tx files in QFileDialog:
TRANSACTION_FILE_EXTENSION_FILTER_ANY = "Transaction (*.txn *.psbt);;All files (*)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX = "Partial Transaction (*.psbt)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX = "Complete Transaction (*.txn)"
TRANSACTION_FILE_EXTENSION_FILTER_SEPARATE = (f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX};;"
f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX};;"
f"All files (*)")
class EnterButton(QPushButton):
def __init__(self, text, func):
QPushButton.__init__(self, text)
self.func = func
self.clicked.connect(func)
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ThreadedButton(QPushButton):
def __init__(self, text, task, on_success=None, on_error=None):
QPushButton.__init__(self, text)
self.task = task
self.on_success = on_success
self.on_error = on_error
self.clicked.connect(self.run_task)
def run_task(self):
self.setEnabled(False)
self.thread = TaskThread(self)
self.thread.add(self.task, self.on_success, self.done, self.on_error)
def done(self):
self.setEnabled(True)
self.thread.stop()
class WWLabel(QLabel):
def __init__ (self, text="", parent=None):
QLabel.__init__(self, text, parent)
self.setWordWrap(True)
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
class HelpLabel(QLabel):
def __init__(self, text, help_text):
QLabel.__init__(self, text)
self.help_text = help_text
self.app = QCoreApplication.instance()
self.font = QFont()
def mouseReleaseEvent(self, x):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text)
def enterEvent(self, event):
self.font.setUnderline(True)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
return QLabel.enterEvent(self, event)
def leaveEvent(self, event):
self.font.setUnderline(False)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
return QLabel.leaveEvent(self, event)
class HelpButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, '?')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text,
rich_text=True)
class InfoButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, 'Info')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(6 * char_width_in_lineedit())
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Info'),
text=self.help_text,
rich_text=True)
class Buttons(QHBoxLayout):
def __init__(self, *buttons):
QHBoxLayout.__init__(self)
self.addStretch(1)
for b in buttons:
if b is None:
continue
self.addWidget(b)
class CloseButton(QPushButton):
def __init__(self, dialog):
QPushButton.__init__(self, _("Close"))
self.clicked.connect(dialog.close)
self.setDefault(True)
class CopyButton(QPushButton):
def __init__(self, text_getter, app):
QPushButton.__init__(self, _("Copy"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
class CopyCloseButton(QPushButton):
def __init__(self, text_getter, app, dialog):
QPushButton.__init__(self, _("Copy and Close"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
self.clicked.connect(dialog.close)
self.setDefault(True)
class OkButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("OK"))
self.clicked.connect(dialog.accept)
self.setDefault(True)
class CancelButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("Cancel"))
self.clicked.connect(dialog.reject)
class MessageBoxMixin(object):
def top_level_window_recurse(self, window=None, test_func=None):
window = window or self
classes = (WindowModalDialog, QMessageBox)
if test_func is None:
test_func = lambda x: True
for n, child in enumerate(window.children()):
# Test for visibility as old closed dialogs may not be GC-ed.
# Only accept children that confirm to test_func.
if isinstance(child, classes) and child.isVisible() \
and test_func(child):
return self.top_level_window_recurse(child, test_func=test_func)
return window
def top_level_window(self, test_func=None):
return self.top_level_window_recurse(test_func)
def question(self, msg, parent=None, title=None, icon=None, **kwargs) -> bool:
Yes, No = QMessageBox.Yes, QMessageBox.No
return Yes == self.msg_box(icon=icon or QMessageBox.Question,
parent=parent,
title=title or '',
text=msg,
buttons=Yes|No,
defaultButton=No,
**kwargs)
def show_warning(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
title or _('Warning'), msg, **kwargs)
def show_error(self, msg, parent=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
_('Error'), msg, **kwargs)
def show_critical(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Critical, parent,
title or _('Critical Error'), msg, **kwargs)
def show_message(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Information, parent,
title or _('Information'), msg, **kwargs)
def msg_box(self, icon, parent, title, text, *, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
parent = parent or self.top_level_window()
return custom_message_box(icon=icon,
parent=parent,
title=title,
text=text,
buttons=buttons,
defaultButton=defaultButton,
rich_text=rich_text,
checkbox=checkbox)
def custom_message_box(*, icon, parent, title, text, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
if type(icon) is QPixmap:
d = QMessageBox(QMessageBox.Information, title, str(text), buttons, parent)
d.setIconPixmap(icon)
else:
d = QMessageBox(icon, title, str(text), buttons, parent)
d.setWindowModality(Qt.WindowModal)
d.setDefaultButton(defaultButton)
if rich_text:
d.setTextInteractionFlags(Qt.TextSelectableByMouse | Qt.LinksAccessibleByMouse)
# set AutoText instead of RichText
# AutoText lets Qt figure out whether to render as rich text.
# e.g. if text is actually plain text and uses "\n" newlines;
# and we set RichText here, newlines would be swallowed
d.setTextFormat(Qt.AutoText)
else:
d.setTextInteractionFlags(Qt.TextSelectableByMouse)
d.setTextFormat(Qt.PlainText)
if checkbox is not None:
d.setCheckBox(checkbox)
return d.exec_()
class WindowModalDialog(QDialog, MessageBoxMixin):
'''Handy wrapper; window modal dialogs are better for our multi-window
daemon model as other wallet windows can still be accessed.'''
def __init__(self, parent, title=None):
QDialog.__init__(self, parent)
self.setWindowModality(Qt.WindowModal)
if title:
self.setWindowTitle(title)
class WaitingDialog(WindowModalDialog):
'''Shows a please wait dialog whilst running a task. It is not
necessary to maintain a reference to this dialog.'''
def __init__(self, parent: QWidget, message: str, task, on_success=None, on_error=None):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
self.accepted.connect(self.on_accepted)
self.show()
self.thread = TaskThread(self)
self.thread.finished.connect(self.deleteLater) # see #3956
self.thread.add(task, on_success, self.accept, on_error)
def wait(self):
self.thread.wait()
def on_accepted(self):
self.thread.stop()
def update(self, msg):
print(msg)
self.message_label.setText(msg)
class BlockingWaitingDialog(WindowModalDialog):
"""Shows a waiting dialog whilst running a task.
Should be called from the GUI thread. The GUI thread will be blocked while
the task is running; the point of the dialog is to provide feedback
to the user regarding what is going on.
"""
def __init__(self, parent: QWidget, message: str, task: Callable[[], Any]):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
self.show()
QCoreApplication.processEvents()
task()
self.accept()
def line_dialog(parent, title, label, ok_label, default=None):
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(500)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = QLineEdit()
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.text()
def text_dialog(parent, title, header_layout, ok_label, default=None, allow_multi=False):
from .qrtextedit import ScanQRTextEdit
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(600)
l = QVBoxLayout()
dialog.setLayout(l)
if isinstance(header_layout, str):
l.addWidget(QLabel(header_layout))
else:
l.addLayout(header_layout)
txt = ScanQRTextEdit(allow_multi=allow_multi)
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.toPlainText()
class ChoicesLayout(object):
def __init__(self, msg, choices, on_clicked=None, checked_index=0):
vbox = QVBoxLayout()
if len(msg) > 50:
vbox.addWidget(WWLabel(msg))
msg = ""
gb2 = QGroupBox(msg)
vbox.addWidget(gb2)
vbox2 = QVBoxLayout()
gb2.setLayout(vbox2)
self.group = group = QButtonGroup()
for i,c in enumerate(choices):
button = QRadioButton(gb2)
button.setText(c)
vbox2.addWidget(button)
group.addButton(button)
group.setId(button, i)
if i==checked_index:
button.setChecked(True)
if on_clicked:
group.buttonClicked.connect(partial(on_clicked, self))
self.vbox = vbox
def layout(self):
return self.vbox
def selected_index(self):
return self.group.checkedId()
def address_field(addresses):
hbox = QHBoxLayout()
address_e = QLineEdit()
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
else:
addresses = []
def func():
try:
i = addresses.index(str(address_e.text())) + 1
i = i % len(addresses)
address_e.setText(addresses[i])
except ValueError:
# the user might have changed address_e to an
# address not in the wallet (or to something that isn't an address)
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
button = QPushButton(_('Address'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(address_e)
return hbox, address_e
def filename_field(parent, config, defaultname, select_msg):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Format")))
gb = QGroupBox("format", parent)
b1 = QRadioButton(gb)
b1.setText(_("CSV"))
b1.setChecked(True)
b2 = QRadioButton(gb)
b2.setText(_("json"))
vbox.addWidget(b1)
vbox.addWidget(b2)
hbox = QHBoxLayout()
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, defaultname )
filename_e = QLineEdit()
filename_e.setText(path)
def func():
text = filename_e.text()
_filter = "*.csv" if text.endswith(".csv") else "*.json" if text.endswith(".json") else None
p, __ = QFileDialog.getSaveFileName(None, select_msg, text, _filter)
if p:
filename_e.setText(p)
button = QPushButton(_('File'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(filename_e)
vbox.addLayout(hbox)
def set_csv(v):
text = filename_e.text()
text = text.replace(".json",".csv") if v else text.replace(".csv",".json")
filename_e.setText(text)
b1.clicked.connect(lambda: set_csv(True))
b2.clicked.connect(lambda: set_csv(False))
return vbox, filename_e, b1
class ElectrumItemDelegate(QStyledItemDelegate):
def __init__(self, tv: 'MyTreeView'):
super().__init__(tv)
self.tv = tv
self.opened = None
def on_closeEditor(editor: QLineEdit, hint):
self.opened = None
def on_commitData(editor: QLineEdit):
new_text = editor.text()
idx = QModelIndex(self.opened)
row, col = idx.row(), idx.column()
_prior_text, user_role = self.tv.text_txid_from_coordinate(row, col)
# check that we didn't forget to set UserRole on an editable field
assert user_role is not None, (row, col)
self.tv.on_edited(idx, user_role, new_text)
self.closeEditor.connect(on_closeEditor)
self.commitData.connect(on_commitData)
def createEditor(self, parent, option, idx):
self.opened = QPersistentModelIndex(idx)
return super().createEditor(parent, option, idx)
class MyTreeView(QTreeView):
ROLE_CLIPBOARD_DATA = Qt.UserRole + 100
def __init__(self, parent: 'ElectrumWindow', create_menu, *,
stretch_column=None, editable_columns=None):
super().__init__(parent)
self.parent = parent
self.config = self.parent.config
self.stretch_column = stretch_column
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(create_menu)
self.setUniformRowHeights(True)
# Control which columns are editable
if editable_columns is not None:
editable_columns = set(editable_columns)
elif stretch_column is not None:
editable_columns = {stretch_column}
else:
editable_columns = {}
self.editable_columns = editable_columns
self.setItemDelegate(ElectrumItemDelegate(self))
self.current_filter = ""
self.setRootIsDecorated(False) # remove left margin
self.toolbar_shown = False
# When figuring out the size of columns, Qt by default looks at
# the first 1000 rows (at least if resize mode is QHeaderView.ResizeToContents).
# This would be REALLY SLOW, and it's not perfect anyway.
# So to speed the UI up considerably, set it to
# only look at as many rows as currently visible.
self.header().setResizeContentsPrecision(0)
self._pending_update = False
self._forced_update = False
def set_editability(self, items):
for idx, i in enumerate(items):
i.setEditable(idx in self.editable_columns)
def selected_in_column(self, column: int):
items = self.selectionModel().selectedIndexes()
return list(x for x in items if x.column() == column)
def current_item_user_role(self, col) -> Any:
idx = self.selectionModel().currentIndex()
idx = idx.sibling(idx.row(), col)
item = self.model().itemFromIndex(idx)
if item:
return item.data(Qt.UserRole)
def set_current_idx(self, set_current: QPersistentModelIndex):
if set_current:
assert isinstance(set_current, QPersistentModelIndex)
assert set_current.isValid()
self.selectionModel().select(QModelIndex(set_current), QItemSelectionModel.SelectCurrent)
def update_headers(self, headers: Union[List[str], Dict[int, str]]):
# headers is either a list of column names, or a dict: (col_idx->col_name)
if not isinstance(headers, dict): # convert to dict
headers = dict(enumerate(headers))
col_names = [headers[col_idx] for col_idx in sorted(headers.keys())]
model = self.model()
model.setHorizontalHeaderLabels(col_names)
self.header().setStretchLastSection(False)
for col_idx in headers:
sm = QHeaderView.Stretch if col_idx == self.stretch_column else QHeaderView.ResizeToContents
self.header().setSectionResizeMode(col_idx, sm)
def keyPressEvent(self, event):
if self.itemDelegate().opened:
return
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.selectionModel().currentIndex())
return
super().keyPressEvent(event)
def on_activated(self, idx):
# on 'enter' we show the menu
pt = self.visualRect(idx).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def edit(self, idx, trigger=QAbstractItemView.AllEditTriggers, event=None):
"""
this is to prevent:
edit: editing failed
from inside qt
"""
return super().edit(idx, trigger, event)
def on_edited(self, idx: QModelIndex, user_role, text):
self.parent.wallet.set_label(user_role, text)
self.parent.history_model.refresh('on_edited in MyTreeView')
self.parent.update_completions()
def should_hide(self, row):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
return False
def text_txid_from_coordinate(self, row_num, column):
assert not isinstance(self.model(), QSortFilterProxyModel)
idx = self.model().index(row_num, column)
item = self.model().itemFromIndex(idx)
user_role = item.data(Qt.UserRole)
return item.text(), user_role
def hide_row(self, row_num):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
should_hide = self.should_hide(row_num)
if not self.current_filter and should_hide is None:
# no filters at all, neither date nor search
self.setRowHidden(row_num, QModelIndex(), False)
return
for column in self.filter_columns:
txt, _ = self.text_txid_from_coordinate(row_num, column)
txt = txt.lower()
if self.current_filter in txt:
# the filter matched, but the date filter might apply
self.setRowHidden(row_num, QModelIndex(), bool(should_hide))
break
else:
# we did not find the filter in any columns, hide the item
self.setRowHidden(row_num, QModelIndex(), True)
def filter(self, p=None):
if p is not None:
p = p.lower()
self.current_filter = p
self.hide_rows()
def hide_rows(self):
for row in range(self.model().rowCount()):
self.hide_row(row)
def create_toolbar(self, config=None):
hbox = QHBoxLayout()
buttons = self.get_toolbar_buttons()
for b in buttons:
b.setVisible(False)
hbox.addWidget(b)
hide_button = QPushButton('x')
hide_button.setVisible(False)
hide_button.pressed.connect(lambda: self.show_toolbar(False, config))
self.toolbar_buttons = buttons + (hide_button,)
hbox.addStretch()
hbox.addWidget(hide_button)
return hbox
def save_toolbar_state(self, state, config):
pass # implemented in subclasses
def show_toolbar(self, state, config=None):
if state == self.toolbar_shown:
return
self.toolbar_shown = state
if config:
self.save_toolbar_state(state, config)
for b in self.toolbar_buttons:
b.setVisible(state)
if not state:
self.on_hide_toolbar()
def toggle_toolbar(self, config=None):
self.show_toolbar(not self.toolbar_shown, config)
def add_copy_menu(self, menu: QMenu, idx) -> QMenu:
cc = menu.addMenu(_("Copy"))
for column in self.Columns:
column_title = self.model().horizontalHeaderItem(column).text()
item_col = self.model().itemFromIndex(idx.sibling(idx.row(), column))
clipboard_data = item_col.data(self.ROLE_CLIPBOARD_DATA)
if clipboard_data is None:
clipboard_data = item_col.text().strip()
cc.addAction(column_title,
lambda text=clipboard_data, title=column_title:
self.place_text_on_clipboard(text, title=title))
return cc
def place_text_on_clipboard(self, text: str, *, title: str = None) -> None:
self.parent.do_copy(text, title=title)
def showEvent(self, e: 'QShowEvent'):
super().showEvent(e)
if e.isAccepted() and self._pending_update:
self._forced_update = True
self.update()
self._forced_update = False
def maybe_defer_update(self) -> bool:
"""Returns whether we should defer an update/refresh."""
defer = not self.isVisible() and not self._forced_update
# side-effect: if we decide to defer update, the state will become stale:
self._pending_update = defer
return defer
class ButtonsWidget(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.buttons = []
def resizeButtons(self):
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
x = self.rect().right() - frameWidth - 10
y = self.rect().bottom() - frameWidth
for button in self.buttons:
sz = button.sizeHint()
x -= sz.width()
button.move(x, y - sz.height())
def addButton(self, icon_name, on_click, tooltip):
button = QToolButton(self)
button.setIcon(read_QIcon(icon_name))
button.setIconSize(QSize(25,25))
button.setCursor(QCursor(Qt.PointingHandCursor))
button.setStyleSheet("QToolButton { border: none; hover {border: 1px} pressed {border: 1px} padding: 0px; }")
button.setVisible(True)
button.setToolTip(tooltip)
button.clicked.connect(on_click)
self.buttons.append(button)
return button
def addCopyButton(self, app):
self.app = app
self.addButton("copy.png", self.on_copy, _("Copy to clipboard"))
def on_copy(self):
self.app.clipboard().setText(self.text())
QToolTip.showText(QCursor.pos(), _("Text copied to clipboard"), self)
class ButtonsLineEdit(QLineEdit, ButtonsWidget):
def __init__(self, text=None):
QLineEdit.__init__(self, text)
self.buttons = []
def resizeEvent(self, e):
o = QLineEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class ButtonsTextEdit(QPlainTextEdit, ButtonsWidget):
def __init__(self, text=None):
QPlainTextEdit.__init__(self, text)
self.setText = self.setPlainText
self.text = self.toPlainText
self.buttons = []
def resizeEvent(self, e):
o = QPlainTextEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class PasswordLineEdit(QLineEdit):
def __init__(self, *args, **kwargs):
QLineEdit.__init__(self, *args, **kwargs)
self.setEchoMode(QLineEdit.Password)
def clear(self):
# Try to actually overwrite the memory.
# This is really just a best-effort thing...
self.setText(len(self.text()) * " ")
super().clear()
class TaskThread(QThread):
'''Thread that runs background tasks. Callbacks are guaranteed
to happen in the context of its parent.'''
class Task(NamedTuple):
task: Callable
cb_success: Optional[Callable]
cb_done: Optional[Callable]
cb_error: Optional[Callable]
doneSig = pyqtSignal(object, object, object)
def __init__(self, parent, on_error=None):
super(TaskThread, self).__init__(parent)
self.on_error = on_error
self.tasks = queue.Queue()
self.doneSig.connect(self.on_done)
self.start()
def add(self, task, on_success=None, on_done=None, on_error=None):
on_error = on_error or self.on_error
self.tasks.put(TaskThread.Task(task, on_success, on_done, on_error))
def run(self):
while True:
task = self.tasks.get() # type: TaskThread.Task
if not task:
break
try:
result = task.task()
self.doneSig.emit(result, task.cb_done, task.cb_success)
except BaseException:
self.doneSig.emit(sys.exc_info(), task.cb_done, task.cb_error)
def on_done(self, result, cb_done, cb_result):
# This runs in the parent's thread.
if cb_done:
cb_done()
if cb_result:
cb_result(result)
def stop(self):
self.tasks.put(None)
class ColorSchemeItem:
def __init__(self, fg_color, bg_color):
self.colors = (fg_color, bg_color)
def _get_color(self, background):
return self.colors[(int(background) + int(ColorScheme.dark_scheme)) % 2]
def as_stylesheet(self, background=False):
css_prefix = "background-" if background else ""
color = self._get_color(background)
return "QWidget {{ {}color:{}; }}".format(css_prefix, color)
def as_color(self, background=False):
color = self._get_color(background)
return QColor(color)
class ColorScheme:
dark_scheme = False
GREEN = ColorSchemeItem("#117c11", "#8af296")
YELLOW = ColorSchemeItem("#897b2a", "#ffff00")
RED = ColorSchemeItem("#7c1111", "#f18c8c")
BLUE = ColorSchemeItem("#123b7c", "#8cb3f2")
DEFAULT = ColorSchemeItem("black", "white")
@staticmethod
def has_dark_background(widget):
brightness = sum(widget.palette().color(QPalette.Background).getRgb()[0:3])
return brightness < (255*3/2)
@staticmethod
def update_from_widget(widget, force_dark=False):
if force_dark or ColorScheme.has_dark_background(widget):
ColorScheme.dark_scheme = True
class AcceptFileDragDrop:
def __init__(self, file_type=""):
assert isinstance(self, QWidget)
self.setAcceptDrops(True)
self.file_type = file_type
def validateEvent(self, event):
if not event.mimeData().hasUrls():
event.ignore()
return False
for url in event.mimeData().urls():
if not url.toLocalFile().endswith(self.file_type):
event.ignore()
return False
event.accept()
return True
def dragEnterEvent(self, event):
self.validateEvent(event)
def dragMoveEvent(self, event):
if self.validateEvent(event):
event.setDropAction(Qt.CopyAction)
def dropEvent(self, event):
if self.validateEvent(event):
for url in event.mimeData().urls():
self.onFileAdded(url.toLocalFile())
def onFileAdded(self, fn):
raise NotImplementedError()
def import_meta_gui(electrum_window, title, importer, on_success):
filter_ = "JSON (*.json);;All files (*)"
filename = electrum_window.getOpenFileName(_("Open {} file").format(title), filter_)
if not filename:
return
try:
importer(filename)
except FileImportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {} were successfully imported").format(title))
on_success()
def export_meta_gui(electrum_window, title, exporter):
filter_ = "JSON (*.json);;All files (*)"
filename = electrum_window.getSaveFileName(_("Select file to save your {}").format(title),
'electrum-ltc_{}.json'.format(title), filter_)
if not filename:
return
try:
exporter(filename)
except FileExportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {0} were exported to '{1}'")
.format(title, str(filename)))
def get_parent_main_window(widget):
"""Returns a reference to the ElectrumWindow this widget belongs to."""
from .main_window import ElectrumWindow
from .transaction_dialog import TxDialog
for _ in range(100):
if widget is None:
return None
if isinstance(widget, ElectrumWindow):
return widget
elif isinstance(widget, TxDialog):
return widget.main_window
else:
widget = widget.parentWidget()
return None
def icon_path(icon_basename):
return resource_path('gui', 'icons', icon_basename)
@lru_cache(maxsize=1000)
def read_QIcon(icon_basename):
return QIcon(icon_path(icon_basename))
def get_default_language():
name = QLocale.system().name()
return name if name in languages else 'en_UK'
def char_width_in_lineedit() -> int:
char_width = QFontMetrics(QLineEdit().font()).averageCharWidth()
# 'averageCharWidth' seems to underestimate on Windows, hence 'max()'
return max(9, char_width)
def webopen(url: str):
if sys.platform == 'linux' and os.environ.get('APPIMAGE'):
# When on Linux webbrowser.open can fail in AppImage because it can't find the correct libdbus.
# We just fork the process and unset LD_LIBRARY_PATH before opening the URL.
# See #5425
if os.fork() == 0:
del os.environ['LD_LIBRARY_PATH']
webbrowser.open(url)
sys.exit(0)
else:
webbrowser.open(url)
if __name__ == "__main__":
app = QApplication([])
t = WaitingDialog(None, 'testing ...', lambda: [time.sleep(1)], lambda x: QMessageBox.information(None, 'done', "done"))
t.start()
app.exec_()
| mit | -5,169,814,777,630,352,000 | 34.364865 | 124 | 0.608271 | false |
GearsAD/semisorted_arnerve | arnerve/core/LCMManager.py | 1 | 3344 | '''
Created on Sep 7, 2014
@author: gearsad
'''
import lcm
#Import the user types
from user_update_t import user_update_t
#Import the bot types
from bot_update_t import bot_update_t
from bot_control_command_t import bot_control_command_t
#Import the role types
from role_response_t import role_response_t
class LCMManager():
def __init__(self):
#Broadcast across the wire
self.lc = lcm.LCM("udpm://239.255.76.67:7667?ttl=1")
self.__subscriptions = []
self.__subscriptions.append(self.lc.subscribe("ARNerve_UserUpdates", self.UpdateUsersHandler))
self.__subscriptions.append(self.lc.subscribe("ARNerve_UserHerder_RoleResponses", self.UpdateFromRoleResponse))
# Add all the bot channels.
self.__subscriptions.append(self.lc.subscribe("ARNerve_Bot_Update_GIRR", self.UpdateBot))
def Attach(self, userManager, roleManager, botManager):
'''
Attach relevant objects to the RoleManager
'''
self.__userManager = userManager
self.__roleManager = roleManager
self.__botManager = botManager
def UpdateUsersHandler(self, channel, data):
'''
Get the updated user and add it to the user manager
'''
msg = user_update_t.decode(data)
if(self.__userManager):
self.__userManager.UpdateUserFromLCM(msg)
#HACK TESTING...
# botControl = bot_control_command_t()
# botControl.name = "GIRR"
# botControl.botTreadVelLeft = 0
# botControl.botTreadVelLeft = 0
# if msg.kinect.is_lhandclosed and msg.kinect.is_rhandclosed:
# botControl.botTreadVelLeft = 1.0
# botControl.botTreadVelright = 1.0
# else:
# if msg.kinect.is_lhandclosed:
# print "---Left Hand CLosed!"
# botControl.botTreadVelLeft = 1.0
# botControl.botTreadVelright = -1.0
# if msg.kinect.is_rhandclosed:
# print "---Right Hand CLosed!"
# botControl.botTreadVelLeft = -1.0
# botControl.botTreadVelright = 1.0
# botControl.isInfraredOn = 0
# botControl.isLightsOn = 0
# botControl.timestamp = 0
# self.lc.publish("ARNerve_Bot_Control_GIRR", botControl.encode())
def UpdateFromRoleResponse(self, channel, data):
'''
Get the role response, parse it, and send it to the role manager
'''
roleResponse = role_response_t.decode(data)
# Now pass it to the role manager
self.__roleManager.ParseRoleResponse(roleResponse)
def UpdateBot(self, channel, data):
'''
Update from a bot frame
'''
botUpdate = bot_update_t.decode(data)
print "[LCMManager] Got an update for bot {0}".format(botUpdate.name)
self.__botManager.UpdateBotFromLCM(botUpdate)
return
def Update(self):
self.lc.handle()
def Disconnect(self):
for subscription in self.__subscriptions:
self.lc.unsubscribe(subscription)
def SendRoleRequest(self, lcmRoleRequest):
'''
Send a role change request to the UserHerder
'''
self.lc.publish("ARNerve_UserHerder_RoleRequests", lcmRoleRequest) | mit | -770,272,538,006,643,300 | 32.787879 | 119 | 0.609151 | false |
JelteF/algorithms | 2/graph.py | 1 | 3463 | import random
import time
def create_random_graph(nodes):
""" Creates a random (directed) graph with the given number of nodes """
graph = []
for i in range(0, nodes):
graph.append([])
for j in range(0, nodes):
rand = random.randint(1, 100)
if rand % 2 == 0 and i != j:
graph[i].append(rand)
else:
graph[i].append(-1)
return graph
def shortest_path_simple(graph, start, end, path=[], length=0, steplength=0):
""" Finds the shortest path in a weighted directed graph recursively """
path = path + [start]
length = length + steplength
if start == end:
return path, length
if start > len(graph) - 1:
return None
shortest = None
shortest_length = None
# Iterate through all neighbours
for i in range(0, len(graph[start])):
if i not in path and graph[start][i] > 0:
# Recursively find a new path from neighbour
newpath, newlength = shortest_path_simple(graph, i, end, path,
length, graph[start][i])
if newpath:
if not shortest_length or newlength < shortest_length:
shortest = newpath
shortest_length = newlength
return shortest, shortest_length
def shortest_path_dijkstra(graph, start, end):
""" Find the shortest path using Dijkstra's Algorithm """
previous = [None] * len(graph)
distance = [None] * len(graph)
visited = [False] * len(graph)
distance[start] = 0
while True:
# Find the unvisited node with the smallest distance
current = get_smallest_entry(visited, distance)
if current is None:
# No path found
return None, None
if current is end:
path = [current]
while current is not start:
# Reconstruct path
path.append(previous[current])
current = previous[current]
return path[::-1], distance[end]
for i in range(0, len(graph[current])):
# Graph[current][i] contains the weight of the edge to i
if graph[current][i] > 0 and not visited[i]:
this_distance = distance[current] + graph[current][i]
if distance[i] is None or this_distance < distance[i]:
distance[i] = this_distance
previous[i] = current
visited[current] = True
def get_smallest_entry(visited, distance):
""" Returns the position of the unvisited node with the smallest
distance. Returns None if no options are left. """
smallest = None
smallest_entry = None
for i in range(0, len(visited)):
if not visited[i] and distance[i] is not None:
if distance[i] < smallest or smallest is None:
smallest_entry = i
smallest = distance[i]
return smallest_entry
if __name__ == '__main__':
graph = create_random_graph(16)
start = time.clock()
path, length = shortest_path_simple(graph, 0, 4)
end = (time.clock() - start)
print "Recursive algorithm found path %s with length %d in %f seconds" % \
(path, length, end)
start = time.clock()
path, length = shortest_path_dijkstra(graph, 0, 4)
end = (time.clock() - start)
print "Dijkstra's algorithm found path %s with length %d in %f seconds" % \
(path, length, end)
| mit | 7,875,017,428,438,846,000 | 31.064815 | 79 | 0.575801 | false |
1upon0/rfid-auth-system | GUI/printer/Pillow-2.7.0/Tests/test_format_hsv.py | 1 | 5636 | from helper import unittest, PillowTestCase, hopper
from PIL import Image
import colorsys
import itertools
class TestFormatHSV(PillowTestCase):
def int_to_float(self, i):
return float(i)/255.0
def str_to_float(self, i):
return float(ord(i))/255.0
def to_int(self, f):
return int(f*255.0)
def tuple_to_ints(self, tp):
x, y, z = tp
return (int(x*255.0), int(y*255.0), int(z*255.0))
def test_sanity(self):
Image.new('HSV', (100, 100))
def wedge(self):
w = Image._wedge()
w90 = w.rotate(90)
(px, h) = w.size
r = Image.new('L', (px*3, h))
g = r.copy()
b = r.copy()
r.paste(w, (0, 0))
r.paste(w90, (px, 0))
g.paste(w90, (0, 0))
g.paste(w, (2*px, 0))
b.paste(w, (px, 0))
b.paste(w90, (2*px, 0))
img = Image.merge('RGB', (r, g, b))
# print (("%d, %d -> "% (int(1.75*px),int(.25*px))) + \
# "(%s, %s, %s)"%img.getpixel((1.75*px, .25*px)))
# print (("%d, %d -> "% (int(.75*px),int(.25*px))) + \
# "(%s, %s, %s)"%img.getpixel((.75*px, .25*px)))
return img
def to_xxx_colorsys(self, im, func, mode):
# convert the hard way using the library colorsys routines.
(r, g, b) = im.split()
if bytes is str:
conv_func = self.str_to_float
else:
conv_func = self.int_to_float
if hasattr(itertools, 'izip'):
iter_helper = itertools.izip
else:
iter_helper = itertools.zip_longest
converted = [self.tuple_to_ints(func(conv_func(_r), conv_func(_g),
conv_func(_b)))
for (_r, _g, _b) in iter_helper(r.tobytes(), g.tobytes(),
b.tobytes())]
if str is bytes:
new_bytes = b''.join(chr(h)+chr(s)+chr(v) for (
h, s, v) in converted)
else:
new_bytes = b''.join(bytes(chr(h)+chr(s)+chr(v), 'latin-1') for (
h, s, v) in converted)
hsv = Image.frombytes(mode, r.size, new_bytes)
return hsv
def to_hsv_colorsys(self, im):
return self.to_xxx_colorsys(im, colorsys.rgb_to_hsv, 'HSV')
def to_rgb_colorsys(self, im):
return self.to_xxx_colorsys(im, colorsys.hsv_to_rgb, 'RGB')
def test_wedge(self):
src = self.wedge().resize((3*32, 32), Image.BILINEAR)
im = src.convert('HSV')
comparable = self.to_hsv_colorsys(src)
# print (im.getpixel((448, 64)))
# print (comparable.getpixel((448, 64)))
# print(im.split()[0].histogram())
# print(comparable.split()[0].histogram())
# im.split()[0].show()
# comparable.split()[0].show()
self.assert_image_similar(im.split()[0], comparable.split()[0],
1, "Hue conversion is wrong")
self.assert_image_similar(im.split()[1], comparable.split()[1],
1, "Saturation conversion is wrong")
self.assert_image_similar(im.split()[2], comparable.split()[2],
1, "Value conversion is wrong")
# print (im.getpixel((192, 64)))
comparable = src
im = im.convert('RGB')
# im.split()[0].show()
# comparable.split()[0].show()
# print (im.getpixel((192, 64)))
# print (comparable.getpixel((192, 64)))
self.assert_image_similar(im.split()[0], comparable.split()[0],
3, "R conversion is wrong")
self.assert_image_similar(im.split()[1], comparable.split()[1],
3, "G conversion is wrong")
self.assert_image_similar(im.split()[2], comparable.split()[2],
3, "B conversion is wrong")
def test_convert(self):
im = hopper('RGB').convert('HSV')
comparable = self.to_hsv_colorsys(hopper('RGB'))
# print ([ord(x) for x in im.split()[0].tobytes()[:80]])
# print ([ord(x) for x in comparable.split()[0].tobytes()[:80]])
# print(im.split()[0].histogram())
# print(comparable.split()[0].histogram())
self.assert_image_similar(im.split()[0], comparable.split()[0],
1, "Hue conversion is wrong")
self.assert_image_similar(im.split()[1], comparable.split()[1],
1, "Saturation conversion is wrong")
self.assert_image_similar(im.split()[2], comparable.split()[2],
1, "Value conversion is wrong")
def test_hsv_to_rgb(self):
comparable = self.to_hsv_colorsys(hopper('RGB'))
converted = comparable.convert('RGB')
comparable = self.to_rgb_colorsys(comparable)
# print(converted.split()[1].histogram())
# print(target.split()[1].histogram())
# print ([ord(x) for x in target.split()[1].tobytes()[:80]])
# print ([ord(x) for x in converted.split()[1].tobytes()[:80]])
self.assert_image_similar(converted.split()[0], comparable.split()[0],
3, "R conversion is wrong")
self.assert_image_similar(converted.split()[1], comparable.split()[1],
3, "G conversion is wrong")
self.assert_image_similar(converted.split()[2], comparable.split()[2],
3, "B conversion is wrong")
if __name__ == '__main__':
unittest.main()
# End of file
| apache-2.0 | 8,927,930,943,546,535,000 | 32.349112 | 78 | 0.503549 | false |
mollyproject/mollyproject | molly/apps/places/providers/tfl.py | 1 | 3958 | from urllib2 import urlopen
from xml.dom import minidom
from collections import defaultdict
import threading
import logging
from django.utils.translation import ugettext_lazy as _
from molly.apps.places.providers import BaseMapsProvider
logger = logging.getLogger(__name__)
class TubeRealtimeProvider(BaseMapsProvider):
"""
Populates tube station entities with real-time departure information
"""
TRACKERNET_STATUS_URL = 'http://cloud.tfl.gov.uk/TrackerNet/StationStatus'
TRACKERNET_PREDICTION_URL = 'http://cloud.tfl.gov.uk/TrackerNet/PredictionDetailed/%s/%s'
def get_statuses(self):
statuses = {}
xml = minidom.parseString(urlopen(self.TRACKERNET_STATUS_URL).read())
for stationstatus in xml.getElementsByTagName('StationStatus'):
name = stationstatus.getElementsByTagName('Station')[0].getAttribute('Name')
status = stationstatus.getElementsByTagName('Status')[0].getAttribute('Description')
status += ' ' + stationstatus.getAttribute('StatusDetails')
statuses[name] = status
return statuses
def augment_metadata(self, entities, **kwargs):
threads = []
for entity in filter(lambda e: e.primary_type.slug == 'tube-station', entities):
# Try and match up entity with StationStatus name
for station, status in self.get_statuses().items():
if entity.title.startswith(station):
entity.metadata['real_time_information'] = {
'pip_info': [status] if status != 'Open ' else [],
}
if 'real_time_information' not in entity.metadata:
entity.metadata['real_time_information'] = {}
if 'london-underground-identifiers' in entity.metadata:
thread = threading.Thread(target=self.get_times, args=[entity])
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def get_times(self, entity):
try:
services = []
station = entity.metadata['london-underground-identifiers']['station-code']
for line in entity.metadata['london-underground-identifiers']['line-codes']:
xml = minidom.parseString(urlopen(self.TRACKERNET_PREDICTION_URL % (line, station)).read())
for platform in xml.getElementsByTagName('P'):
next_info = defaultdict(list)
for tag in platform.getElementsByTagName('T'):
dest = '%s (%s)' % (
tag.getAttribute('Destination'),
xml.getElementsByTagName('LineName')[0].childNodes[0].nodeValue
)
next_info[dest].append(int(tag.getAttribute('SecondsTo')))
for dest, eta in next_info.items():
services.append({
'service': _('Plat %s') % platform.getAttribute('Num'),
'destination': dest,
'etas': eta
})
services.sort(key=lambda s: s['etas'][0])
for service in services:
etas = [round(e/60) for e in service['etas']]
# Translators: This refers to arrival times of trains, in minutes
etas = [_('DUE') if e == 0 else _('%d mins') % e for e in etas]
service['next'] = etas[0]
service['following'] = etas[1:]
del service['etas']
entity.metadata['real_time_information']['services'] = services
entity.metadata['meta_refresh'] = 30
except Exception as e:
logger.exception('Failed to get RTI from Trackernet')
| apache-2.0 | -4,033,144,124,539,551,000 | 42.977778 | 107 | 0.55331 | false |
skosukhin/spack | var/spack/repos/builtin/packages/r-affycompatible/package.py | 1 | 2149 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffycompatible(RPackage):
"""This package provides an interface to Affymetrix chip annotation
and sample attribute files. The package allows an easy way for users
to download and manage local data bases of Affynmetrix NetAffx
annotation files. The package also provides access to GeneChip
Operating System (GCOS) and GeneChip Command Console
(AGCC)-compatible sample annotation files."""
homepage = "https://www.bioconductor.org/packages/AffyCompatible/"
url = "https://git.bioconductor.org/packages/AffyCompatible"
version('1.36.0', 'https://git.bioconductor.org/packages/AffyCompatible', commit='dbbfd43a54ae1de6173336683a9461084ebf38c3')
depends_on('[email protected]:3.4.9', when=('@1.36.0'))
depends_on('r-xml', type=('build', 'run'))
depends_on('r-rcurl', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
| lgpl-2.1 | 8,644,395,620,993,898,000 | 47.840909 | 128 | 0.689158 | false |
ryankanno/py-utilities | tests/time/test_date_utilities.py | 1 | 1831 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import calendar
from datetime import timedelta
from nose.tools import ok_
from py_utilities.time.date_utilities import EPOCH_AS_STRUCT_TIME
from py_utilities.time.date_utilities import EPOCH_AS_DATETIME
from py_utilities.time.date_utilities import next_day
from py_utilities.time.date_utilities import random_datetime
import pytz
import time
import unittest
class TestDateUtilities(unittest.TestCase):
def test_epoch_as_struct_time(self):
ok_(EPOCH_AS_STRUCT_TIME == time.gmtime(0))
def test_epoch_as_datetime(self):
ok_(EPOCH_AS_DATETIME.year == 1970)
def test_next_day_same_week_where_day_hasnt_passed(self):
# epoch is Thursday, January 1, 1970
saturday = next_day(EPOCH_AS_DATETIME, 6)
ok_(saturday.day == 3)
ok_(saturday.year == 1970)
ok_(saturday.month == 1)
def test_next_day_next_week_where_day_has_passed(self):
# epoch is Thursday, January 1, 1970
next_wednesday = next_day(EPOCH_AS_DATETIME, 3)
ok_(next_wednesday.day == 7)
ok_(next_wednesday.year == 1970)
ok_(next_wednesday.month == 1)
def test_random_datetime_with_utc_tz(self):
for x in xrange(1000):
x += 1
start_datetime = pytz.utc.localize(EPOCH_AS_DATETIME)
start_timestamp = calendar.timegm(start_datetime.utctimetuple())
end_datetime = pytz.utc.localize(EPOCH_AS_DATETIME +
timedelta(days=x))
end_timestamp = calendar.timegm(end_datetime.utctimetuple())
random = random_datetime(start_timestamp, end_timestamp,
pytz.utc)
ok_(random >= start_datetime)
ok_(end_datetime >= random)
# vim: filetype=apython
| mit | -4,479,779,889,592,963,000 | 34.901961 | 76 | 0.631349 | false |
endlessm/chromium-browser | third_party/llvm/lldb/test/API/functionalities/avoids-fd-leak/TestFdLeak.py | 1 | 4011 | """
Test whether a process started by lldb has no extra file descriptors open.
"""
import lldb
from lldbsuite.test import lldbutil
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
def python_leaky_fd_version(test):
import sys
# Python random module leaks file descriptors on some versions.
if sys.version_info >= (2, 7, 8) and sys.version_info < (2, 7, 10):
return "Python random module leaks file descriptors in this python version"
return None
class AvoidsFdLeakTestCase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
@expectedFailure(python_leaky_fd_version, "bugs.freebsd.org/197376")
@expectedFailureAll(
oslist=['freebsd'],
bugnumber="llvm.org/pr25624 still failing with Python 2.7.10")
# The check for descriptor leakage needs to be implemented differently
# here.
@skipIfWindows
@skipIfTargetAndroid() # Android have some other file descriptors open by the shell
@skipIfDarwinEmbedded # <rdar://problem/33888742> # debugserver on ios has an extra fd open on launch
def test_fd_leak_basic(self):
self.do_test([])
@expectedFailure(python_leaky_fd_version, "bugs.freebsd.org/197376")
@expectedFailureAll(
oslist=['freebsd'],
bugnumber="llvm.org/pr25624 still failing with Python 2.7.10")
# The check for descriptor leakage needs to be implemented differently
# here.
@skipIfWindows
@skipIfTargetAndroid() # Android have some other file descriptors open by the shell
@skipIfDarwinEmbedded # <rdar://problem/33888742> # debugserver on ios has an extra fd open on launch
def test_fd_leak_log(self):
self.do_test(["log enable -f '/dev/null' lldb commands"])
def do_test(self, commands):
self.build()
exe = self.getBuildArtifact("a.out")
for c in commands:
self.runCmd(c)
target = self.dbg.CreateTarget(exe)
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
self.assertTrue(
process.GetState() == lldb.eStateExited,
"Process should have exited.")
self.assertTrue(
process.GetExitStatus() == 0,
"Process returned non-zero status. Were incorrect file descriptors passed?")
@expectedFailure(python_leaky_fd_version, "bugs.freebsd.org/197376")
@expectedFailureAll(
oslist=['freebsd'],
bugnumber="llvm.org/pr25624 still failing with Python 2.7.10")
# The check for descriptor leakage needs to be implemented differently
# here.
@skipIfWindows
@skipIfTargetAndroid() # Android have some other file descriptors open by the shell
@skipIfDarwinEmbedded # <rdar://problem/33888742> # debugserver on ios has an extra fd open on launch
def test_fd_leak_multitarget(self):
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
breakpoint = target.BreakpointCreateBySourceRegex(
'Set breakpoint here', lldb.SBFileSpec("main.c", False))
self.assertTrue(breakpoint, VALID_BREAKPOINT)
process1 = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process1, PROCESS_IS_VALID)
self.assertTrue(
process1.GetState() == lldb.eStateStopped,
"Process should have been stopped.")
target2 = self.dbg.CreateTarget(exe)
process2 = target2.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process2, PROCESS_IS_VALID)
self.assertTrue(
process2.GetState() == lldb.eStateExited,
"Process should have exited.")
self.assertTrue(
process2.GetExitStatus() == 0,
"Process returned non-zero status. Were incorrect file descriptors passed?")
| bsd-3-clause | -3,330,200,459,424,116,000 | 36.839623 | 106 | 0.667913 | false |
jwlin/web-crawler-tutorial | ch4/google_finance_api.py | 1 | 2251 | import requests
import json
from datetime import datetime, timedelta
def get_stock(query):
# query 可以是多支股票, 如 TPE:2330,TPE:2498, 不同股票以 , 分開
resp = requests.get('http://finance.google.com/finance/info?client=ig&q=' + query)
if resp.status_code == 200:
# 移除回傳資料開頭的 //
# 剩下的資料是一個 list of dict, 每個 dict 是一支股票的資訊
return json.loads(resp.text.replace('//', ''))
else:
return None
def get_stock_history(stock_id, stock_mkt):
resp = requests.get('http://www.google.com/finance/getprices?q=' + stock_id + '&x=' + stock_mkt + '&i=86400&p=1M')
''' e.g.,
EXCHANGE%3DTPE
MARKET_OPEN_MINUTE=540
MARKET_CLOSE_MINUTE=810
INTERVAL=86400
COLUMNS=DATE,CLOSE,HIGH,LOW,OPEN,VOLUME
DATA=
TIMEZONE_OFFSET=480
a1488346200,186,188.5,186,188.5,46176000
1,186,188.5,185,188,39914000
2,184,185,184,184.5,28085000
5,183.5,184.5,183.5,184,12527000
...
'''
index = -1
lines = resp.text.split('\n')
for line in lines:
# 'a' 開頭表示股價資訊起始列
if line.startswith('a'):
index = lines.index(line)
break
if index > 0:
lines = lines[index:]
# 找出起始行日期
unix_time = int(lines[0].split(',')[0][1:])
init_time = datetime.fromtimestamp(unix_time)
rows = list()
# 處理第一列
first_row = lines[0].split(',')
first_row[0] = init_time
rows.append(first_row)
# 處理剩餘列
for l in lines[1:]:
if l:
row = l.split(',')
delta = int(row[0])
row[0] = init_time + timedelta(days=delta)
rows.append(row)
return rows
else:
return None
if __name__ == '__main__':
query = 'TPE:2330'
print(query, '即時股價')
stocks = get_stock(query)
print(stocks[0])
print('-----')
stock_id, stock_mkt = '2330', 'TPE'
print(stock_mkt, stock_id, '歷史股價 (Date, Close, High, Low, Open, Volume)')
rows = get_stock_history('2330', 'TPE')
for row in rows:
print(row[0].strftime("%Y/%m/%d"), row[1:])
| mit | -177,420,440,146,556,900 | 28.097222 | 118 | 0.556086 | false |
devilry/devilry-django | devilry/devilry_group/feedbackfeed_builder/feedbackfeed_sidebarbuilder.py | 1 | 1342 | # -*- coding: utf-8 -*-
# Devilry imports
from devilry.devilry_comment.models import CommentFile
from devilry.devilry_group.feedbackfeed_builder import builder_base
from devilry.devilry_group import models as group_models
class FeedbackFeedSidebarBuilder(builder_base.FeedbackFeedBuilderBase):
def __init__(self, **kwargs):
super(FeedbackFeedSidebarBuilder, self).__init__(**kwargs)
self.feedbackset_dict = {}
def __get_files_for_comment(self, comment):
commentfiles = comment.commentfile_set.all()
commentfilelist = []
for commentfile in commentfiles:
commentfilelist.append(commentfile)
return commentfilelist
def build(self):
for feedbackset in self.feedbacksets:
self.feedbackset_dict[feedbackset.created_datetime] = {
'feedbackset_num': 0,
'feedbackset': feedbackset
}
self.feedbackset_dict = self.sort_dict(self.feedbackset_dict)
def get_as_list(self):
feedbackset_list = []
num = 1
for key_datetime in sorted(self.feedbackset_dict.keys()):
feedbacksets = self.feedbackset_dict[key_datetime]
feedbacksets['feedbackset_num'] = num
feedbackset_list.append(feedbacksets)
num += 1
return feedbackset_list
| bsd-3-clause | -1,378,049,649,158,134,000 | 34.315789 | 71 | 0.651267 | false |
bbengfort/cloudscope | cloudscope/console/commands/modify.py | 1 | 12414 | # cloudscope.console.commands.modify
# Modifies topologies in place for deploying to alternative sites.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Fri Aug 12 11:36:41 2016 -0400
#
# Copyright (C) 2016 University of Maryland
# For license information, see LICENSE.txt
#
# ID: modify.py [] [email protected] $
"""
Modifies topologies in place for deploying to alternative sites.
The original version of this script resets local paths for the traces and
modifies local and wide area latency for nodes.
"""
##########################################################################
## Imports
##########################################################################
import os
import json
import argparse
import warnings
from commis import Command
from commis.exceptions import ConsoleError
from cloudscope.experiment import compute_tick
##########################################################################
## Key/Value Type
##########################################################################
def keyval(string):
"""
Parses a key/value pair from the command line.
"""
pairs = [
map(lambda s: s.strip(), pair.split("="))
for pair in string.split("&")
]
if not all([len(pair) == 2 for pair in pairs]):
raise argparse.ArgumentTypeError(
"Must pass key/value pairs as key1=value1&key2=value2"
)
return dict(pairs)
##########################################################################
## Command
##########################################################################
class ModifyTopologyCommand(Command):
name = 'modify'
help = 'modifies a topology in place with new information'
args = {
'--Lm': {
"type": int,
"default": None,
"dest": "local_mean",
"help": 'modify the local area connection mean latencies',
},
'--Ls': {
"type": int,
"default": None,
"dest": "local_stddev",
"help": 'modify the local area connection latency standard deviation',
},
'--Wm': {
"type": int,
"default": None,
"dest": "wide_mean",
"help": 'modify the wide area connection mean latencies',
},
'--Ws': {
"type": int,
"default": None,
"dest": "wide_stddev",
"help": 'modify the wide area connection latency standard deviation',
},
'--sync-prob': {
"metavar": "P",
"type": float,
"default": None,
"help": "modify the sync probability of eventual nodes",
},
'--local-prob': {
"metavar": "P",
"type": float,
"default": None,
"help": "modify the select local probability of eventual nodes",
},
('-T', '--traces'): {
"metavar": "PATH",
"default": None,
"help": "specify a directory or trace to replace traces information",
},
('-M', '--meta'): {
"metavar": "KEY=VAL",
"default": None,
"type": keyval,
"help": "specify key/value pairs to modify in the meta data",
},
'topologies': {
'nargs': '+',
'metavar': 'topo.json',
'help': 'path(s) to the experiment topologies to modify',
}
}
def handle(self, args):
"""
Handles the modification of one or more topology files, collecting
information about how many edits are being made in the topology.
"""
mods = 0 # Track how many key/value pairs are being modified.
for path in args.topologies:
mods += self.modify_topology(path, args)
return "Modified {} key/value pairs in {} topologies".format(
mods, len(args.topologies)
)
def modify_topology(self, path, args):
"""
Modifies a topology in a file-like object with data input from the
command line, tracking how many changes are made at each point.
"""
# Load the topology data
with open(path, 'r') as fobj:
topo = json.load(fobj)
# Track the number of modifications
mods = 0
# If the local area parameters have been passed in, modify them.
if args.local_mean or args.local_stddev:
mods += self.modify_local_network(
topo, args.local_mean, args.local_stddev
)
# If the wide area parameters have been passed in, modify them.
if args.wide_mean or args.wide_stddev:
mods += self.modify_wide_network(
topo, args.wide_mean, args.wide_stddev
)
# If new traces have been passed in, modify it.
if args.traces:
mods += self.modify_traces(
topo, args.traces
)
# Modify Raft nodes
mods += self.modify_sequential(topo, args)
# Modify Eventual nodes
mods += self.modify_eventual(topo, args)
# Modify the meta data with the new information.
mods += self.modify_meta_info(topo, args)
# Dump the topology that has been modified back to disk.
# TODO: should we check if we've made any modifications before this?
with open(path, 'w') as fobj:
json.dump(topo, fobj, indent=2)
return mods
def modify_local_network(self, topo, mean, stddev):
"""
Modifies local area connections according to the network mean and
standard deviation. Returns number of modifications.
"""
# Modifications
mods = 0
# Must supply both the mean and the stddev
if not mean or not stddev:
raise ConsoleError(
"Must supply both the local mean and local standard deviation!"
)
# Modify the local links only!
for link in topo['links']:
if link['area'] == 'local':
mods += self.update_dict_value(link, 'latency', (mean, stddev))
# Modify the meta data about local connections
mods += self.update_meta_param(topo, 'local_latency', (mean, stddev))
return mods
def modify_wide_network(self, topo, mean, stddev):
"""
Modifies wide area connections according to the network mean and
standard deviation. This function will also update timing parameters
of the nodes according to the tick; it will also necessarily update
some of the meta information. Returns number of modifications.
"""
# Modifications
mods = 0
# Must supply both the mean and the stddev
if not mean or not stddev:
raise ConsoleError(
"Must supply both the wide mean and wide standard deviation!"
)
# Compute the tick parameter and timing params
tick_model = model=topo['meta'].get('tick_param_model', 'conservative')
T = compute_tick(mean, stddev, tick_model)
# Timing parameters for individual nodes
eto = (T, 2*T)
hbi = T/2
aed = T/4
# Modify each node's timing parameters
for node in topo['nodes']:
if 'election_timeout' in node:
mods += self.update_dict_value(node, 'election_timeout', eto)
if 'heartbeat_interval' in node:
mods += self.update_dict_value(node, 'heartbeat_interval', hbi)
if 'anti_entropy_delay' in node:
mods += self.update_dict_value(node, 'anti_entropy_delay', aed)
# Modify the wide links only!
for link in topo['links']:
if link['area'] == 'wide':
mods += self.update_dict_value(link, 'latency', (mean, stddev))
# Modify the meta data
mods += self.update_meta_param(topo, 'tick_param_model', tick_model)
mods += self.update_meta_param(topo, 'wide_latency', (mean, stddev))
mods += self.update_meta_param(topo, 'anti_entropy_delay', aed)
mods += self.update_meta_param(topo, 'election_timeout', eto)
mods += self.update_meta_param(topo, 'heartbeat_interval', hbi)
mods += self.update_meta_param(topo, 'latency_mean', mean)
mods += self.update_meta_param(topo, 'latency_stddev', stddev)
mods += self.update_meta_param(topo, 'tick_metric', T)
mods += self.update_meta_param(topo, 'variable', "{}-{}ms".format(
mean - 2*stddev, mean + 2*stddev)
)
return mods
def modify_traces(self, topo, traces):
"""
Modifies the traces inside the meta data of the topology. Returns the
number of modifications made.
"""
# Modifications
mods = 0
if os.path.isdir(traces):
# Replace the metadata trace with a new directory
name = os.path.basename(topo['meta']['trace'])
path = os.path.abspath(os.path.join(traces, name))
# Quick check to make sure the trace exists
if not os.path.exists(path):
raise ConsoleError(
"Trace at {} does not exist!".format(path)
)
mods += self.update_meta_param(topo, 'trace', path)
elif os.path.isfile(traces):
# Replace the trace with the specified file.
mods += self.update_meta_param(topo, 'trace', traces)
else:
raise ConsoleError(
"Supply either a valid directory or path to a trace!"
)
return mods
def modify_meta_info(self, topo, args):
"""
Finalizes the meta information of the topology according to any global
changes that may have been made and need to be tracked. Returns the
total number of modifications made to the topology meta info.
"""
# Modifications
mods = 0
# Modify the overall latency range
local = topo['meta'].get('local_latency', [None, None])[0]
wide = topo['meta'].get('wide_latency', [None, None])[0]
lrng = [min(local, wide), max(local, wide)]
mods += self.update_meta_param(topo, 'latency_range', lrng)
if args.meta:
for key, val in args.meta.items():
mods += self.update_meta_param(topo, key, val)
return mods
def modify_sequential(self, topo, args):
"""
Modify sequential nodes with specific policies.
For now, this method is a noop.
"""
return 0
def modify_eventual(self, topo, args):
"""
Modify eventual nodes with specific policies. This method currently:
- sets the sync probability if given (and modifies the meta)
- sets the local probability if given (and modifies the meta)
Returns the number of modifications made.
"""
mods = 0 # count the number of modifications
# Modify each node's local and sync probabilities
for node in topo['nodes']:
# Only modify eventual or stentor nodes
if node['consistency'] not in {'eventual', 'stentor'}:
continue
if args.sync_prob is not None:
mods += self.update_dict_value(node, 'sync_prob', args.sync_prob)
if args.local_prob is not None:
mods += self.update_dict_value(node, 'local_prob', args.local_prob)
# Modify the meta information
if args.sync_prob is not None:
mods += self.update_meta_param(topo, 'sync_prob', args.sync_prob)
if args.local_prob is not None:
mods += self.update_meta_param(topo, 'local_prob', args.local_prob)
return mods
def update_dict_value(self, item, key, value):
"""
Updates a value in the dictionary if the supplied value doesn't match
the value for that key and returns 1, otherwise returns 0.
"""
if item.get(key, None) != value:
item[key] = value
return 1
return 0
def update_meta_param(self, topo, key, value):
"""
Updates a meta data parameter if the supplied key doesn't match the
value and returns 1 otherwise returns 0.
"""
return self.update_dict_value(topo['meta'], key, value)
| mit | 4,806,752,857,086,505,000 | 33.010959 | 83 | 0.550991 | false |
keturn/txOpenBCI | txopenbci/control.py | 1 | 6185 | # -*- coding: utf-8 -*-
"""
Players:
* one who makes sure a connection to the device is open
- a stable presence in the community; everyone knows where to find them
* one who holds the connection to the device
- may come and go with the connection
* one who knows how to command the device
* one who hears what the device tells us
* those who listen, and interpret
* those who listen, and record
* those who listen, and display
"""
import os
from twisted.application.service import Service
from twisted.internet.endpoints import connectProtocol
from twisted.internet.error import ConnectionClosed
from twisted.python import log
from ._sausage import makeProtocol
from . import protocol
try:
import numpy
except ImportError, e:
numpy = None
numpy_reason = e
else:
numpy_reason = None
from .serial_endpoint import SerialPortEndpoint
def serialOpenBCI(serialPortName, reactor):
return SerialPortEndpoint(serialPortName, reactor,
baudrate=protocol.BAUD_RATE)
class DeviceSender(object):
_transport = None
def setTransport(self, transport):
self._transport = transport
def stopFlow(self):
self._transport = None
def _write(self, content):
return self._transport.write(content)
def reset(self):
self._write(protocol.CMD_RESET)
def start_stream(self):
self._write(protocol.CMD_STREAM_START)
def stop_stream(self):
log.msg("sending stop command")
self._write(protocol.CMD_STREAM_STOP)
class RawSample(object):
__slots__ = ['counter', 'eeg', 'accelerometer']
def __init__(self, counter, eeg, accelerometer):
self.counter = counter
self.eeg = eeg
self.accelerometer = accelerometer
def __hash__(self):
return hash((self.counter, self.eeg, self.accelerometer))
class DeviceReceiver(object):
currentRule = 'idle'
def __init__(self, commander):
"""
:type commander: DeviceCommander
"""
self.commander = commander
self._debugLog = None
self._sampleSubscribers = set()
def logIncoming(self, data):
if not self._debugLog:
filename = 'debug.%x.raw' % (os.getpid(),)
self._debugLog = file(filename, 'wb')
self._debugLog.write(data)
def handleResponse(self, content):
log.msg("device response:")
log.msg(content)
# sw33t hacks to capture some debug data
# log.msg("entering debug dump mode")
# self.currentRule = 'debug'
# self.sender.start_stream()
# from twisted.internet import reactor
# reactor.callLater(0.4, self.sender.stop_stream)
def handleSample(self, counter, sample):
# TODO: handle wrapping counter
# TODO: handle skipped packets
if self._sampleSubscribers:
eeg = protocol.int32From3Bytes(sample, 8, 0)
accelerometer = protocol.accelerometerFromBytes(sample, 24)
sample = RawSample(counter, eeg, accelerometer)
self._publishSample(sample)
def _publishSample(self, sample):
for listener in self._sampleSubscribers:
listener(sample)
# == Interfaces for subscribers ==
def subscribeToSampleData(self, listener):
self._sampleSubscribers.add(listener)
# prepareParsing and finishParsing are not called from the grammar, but
# from the ParserProtocol, as connection-related events.
def prepareParsing(self, parser):
self.commander.deviceOpen()
def finishParsing(self, reason):
self.commander.deviceLost(reason)
class DeviceCommander(object):
_senderFactory = DeviceSender
_connecting = None
def __init__(self):
self.client = None
self.sender = DeviceSender()
self.receiver = DeviceReceiver(self)
self._protocolClass = makeProtocol(
protocol.grammar, self.sender, self.receiver,
name="OpenBCIDevice")
def connect(self, endpoint):
if self.client:
raise RuntimeError("Already connected to %s" % (self.client,))
if self._connecting:
raise RuntimeError("Connection already in progress.")
self._connecting = connectProtocol(endpoint, self._protocolClass())
self._connecting.addCallbacks(self._setClient, self._connectFailed)
def _setClient(self, client):
self.client = client
self._connecting = None
def _connectFailed(self, reason):
log.msg(reason.getErrorMessage())
self._connecting = None
# == Events we get from DeviceReceiver ==
def deviceOpen(self):
# Send the reset command, so we know we're starting with a predictable
# state.
self.sender.reset()
def deviceLost(self, reason):
if not reason.check(ConnectionClosed):
log.msg("Parser error: %s" % (reason.getErrorMessage(),))
log.msg(reason.getTraceback())
else:
log.msg("Receiver finished: %s" % (reason.getErrorMessage(),))
self.client = None
# == Outward-facing commands: ==
def hangUp(self):
if self.client:
self.sender.stop_stream()
self.client.transport.loseConnection()
def destroy(self):
self.hangUp()
self.client = None
if self._connecting:
self._connecting.cancel()
def startStream(self):
self.receiver.currentRule = 'sample'
self.sender.start_stream()
def stopStream(self):
self.sender.stop_stream()
# TODO: set currentRule back once stream actually ends
def reset(self):
self.sender.reset()
class DeviceService(Service):
def __init__(self, endpoint):
self.endpoint = endpoint
self.commander = DeviceCommander()
def startService(self):
log.msg("Starting service.")
if numpy_reason:
log.msg("Note: numpy is not available: %s" % (numpy_reason,))
Service.startService(self)
self.commander.connect(self.endpoint)
def stopService(self):
self.commander.destroy()
Service.stopService(self)
| apache-2.0 | -3,535,835,345,981,307,000 | 25.545064 | 78 | 0.638157 | false |
mdworks2016/work_development | Python/20_Third_Certification/venv/lib/python3.7/site-packages/celery/backends/s3.py | 1 | 2745 | # -*- coding: utf-8 -*-
"""s3 result store backend."""
from __future__ import absolute_import, unicode_literals
from kombu.utils.encoding import bytes_to_str
from celery.exceptions import ImproperlyConfigured
from .base import KeyValueStoreBackend
try:
import boto3
import botocore
except ImportError:
boto3 = None
botocore = None
__all__ = ('S3Backend',)
class S3Backend(KeyValueStoreBackend):
"""An S3 task result store.
Raises:
celery.exceptions.ImproperlyConfigured:
if module :pypi:`boto3` is not available,
if the :setting:`aws_access_key_id` or
setting:`aws_secret_access_key` are not set,
or it the :setting:`bucket` is not set.
"""
def __init__(self, **kwargs):
super(S3Backend, self).__init__(**kwargs)
if not boto3 or not botocore:
raise ImproperlyConfigured('You must install boto3'
'to use s3 backend')
conf = self.app.conf
self.endpoint_url = conf.get('s3_endpoint_url', None)
self.aws_region = conf.get('s3_region', None)
self.aws_access_key_id = conf.get('s3_access_key_id', None)
self.aws_secret_access_key = conf.get('s3_secret_access_key', None)
self.bucket_name = conf.get('s3_bucket', None)
if not self.bucket_name:
raise ImproperlyConfigured('Missing bucket name')
self.base_path = conf.get('s3_base_path', None)
self._s3_resource = self._connect_to_s3()
def _get_s3_object(self, key):
key_bucket_path = self.base_path + key if self.base_path else key
return self._s3_resource.Object(self.bucket_name, key_bucket_path)
def get(self, key):
key = bytes_to_str(key)
s3_object = self._get_s3_object(key)
try:
s3_object.load()
return s3_object.get()['Body'].read().decode('utf-8')
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == "404":
return None
raise error
def set(self, key, value):
key = bytes_to_str(key)
s3_object = self._get_s3_object(key)
s3_object.put(Body=value)
def delete(self, key):
s3_object = self._get_s3_object(key)
s3_object.delete()
def _connect_to_s3(self):
session = boto3.Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.aws_region
)
if session.get_credentials() is None:
raise ImproperlyConfigured('Missing aws s3 creds')
return session.resource('s3', endpoint_url=self.endpoint_url)
| apache-2.0 | 759,880,483,341,247,200 | 30.551724 | 75 | 0.601821 | false |
jhartford/pybo | pybo/demos/intermediate.py | 1 | 1969 | """
Demo performing Bayesian optimization on an objective function sampled from a
Gaussian process. This script also demonstrates user-defined visualization via
a callback function that is imported from the advanced demo.
Note that in this demo we are sampling an objective function from a Gaussian
process. We are not, however, modifying the default GP used internally by
`pybo.solve_bayesopt`. The default model used within `pybo.solve_bayesopt` is a
GP with constant mean, Matern 5 kernel, and hyperparameters marginalized using
MCMC. To modify this behavior see the advanced demo.
In this demo we also explore the following additional Bayesian optimization
modules that can be user-defined:
- the initial search grid,
- the selection policy,
- the recommendation strategy, and
- composite kernels (a `pygp` feature).
"""
import numpy as np
import pygp
import pybo
# import callback from advanced demo
import os
import sys
sys.path.append(os.path.dirname(__file__))
from advanced import callback
if __name__ == '__main__':
rng = 0 # random seed
bounds = np.array([3, 5]) # bounds of search space
dim = bounds.shape[0] # dimension of space
# define a GP which we will sample an objective from.
likelihood = pygp.likelihoods.Gaussian(sigma=1e-6)
kernel = pygp.kernels.Periodic(1, 1, 0.5) + pygp.kernels.SE(1, 1)
gp = pygp.inference.ExactGP(likelihood, kernel, mean=0.0)
objective = pybo.functions.GPModel(bounds, gp, rng=rng)
info = pybo.solve_bayesopt(
objective,
bounds,
niter=30*dim,
init='latin', # initialization policy
policy='thompson', # exploration policy
recommender='observed', # recommendation policy
noisefree=True,
rng=rng,
callback=callback)
| bsd-2-clause | -4,735,991,816,232,202,000 | 37.607843 | 80 | 0.6516 | false |
DirectXMan12/nova-hacking | nova/tests/api/openstack/compute/contrib/test_flavorextradata.py | 1 | 3504 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import webob
from nova.compute import flavors
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
def fake_get_flavor_by_flavor_id(flavorid):
return {
'id': flavorid,
'flavorid': str(flavorid),
'root_gb': 1,
'ephemeral_gb': 1,
'name': u'test',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
'updated_at': None,
'memory_mb': 512,
'vcpus': 1,
'extra_specs': {},
'deleted_at': None,
'vcpu_weight': None,
}
def fake_get_all_flavors(inactive=0, filters=None):
return {
'fake1': fake_get_flavor_by_flavor_id(1),
'fake2': fake_get_flavor_by_flavor_id(2)
}
class FlavorextradataTest(test.TestCase):
def setUp(self):
super(FlavorextradataTest, self).setUp()
ext = ('nova.api.openstack.compute.contrib'
'.flavorextradata.Flavorextradata')
self.flags(osapi_compute_extension=[ext])
self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
self.stubs.Set(flavors, 'get_all_flavors', fake_get_all_flavors)
def _verify_flavor_response(self, flavor, expected):
for key in expected:
self.assertEquals(flavor[key], expected[key])
def test_show(self):
expected = {
'flavor': {
'id': '1',
'name': 'test',
'ram': 512,
'vcpus': 1,
'disk': 1,
'OS-FLV-EXT-DATA:ephemeral': 1,
}
}
url = '/v2/fake/flavors/1'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(init_only=('flavors',)))
body = jsonutils.loads(res.body)
self._verify_flavor_response(body['flavor'], expected['flavor'])
def test_detail(self):
expected = [
{
'id': '1',
'name': 'test',
'ram': 512,
'vcpus': 1,
'disk': 1,
'OS-FLV-EXT-DATA:ephemeral': 1,
},
{
'id': '2',
'name': 'test',
'ram': 512,
'vcpus': 1,
'disk': 1,
'OS-FLV-EXT-DATA:ephemeral': 1,
},
]
url = '/v2/fake/flavors/detail'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(init_only=('flavors',)))
body = jsonutils.loads(res.body)
for i, flavor in enumerate(body['flavors']):
self._verify_flavor_response(flavor, expected[i])
| apache-2.0 | 6,486,008,720,021,884,000 | 30.854545 | 78 | 0.548801 | false |
adongy/adminradius | admin_radius/models.py | 1 | 4418 | from django.db import models
from .raw_models import *
from django.core.urlresolvers import reverse
import datetime
from django.core.exceptions import ValidationError
class RadPassManager(models.Manager):
def get_queryset(self):
return super(RadPassManager, self).get_queryset().filter(attribute='NT-Password', op=':=')
class RadStartDateManager(models.Manager):
def get_queryset(self):
return super(RadStartDateManager, self).get_queryset().filter(attribute='User-Start-Date', op=':=')
class RadEndDateManager(models.Manager):
def get_queryset(self):
return super(RadEndDateManager, self).get_queryset().filter(attribute='User-End-Date', op=':=')
class RadPass(Radcheck):
objects = RadPassManager()
def __init__(self, *args, **kwargs):
self._meta.get_field('attribute').default = 'NT-Password'
self._meta.get_field('op').default = ':='
super(RadPass, self).__init__(*args, **kwargs)
def clean_fields(self, exclude=None):
super(RadPass, self).clean_fields(exclude)
if self.value and len(self.value) != 32:
raise ValidationError(_("Hash is incorrectly formatted. Input as a 32 hexadecimal character string without a leading '0x' prefix."))
class Meta:
proxy = True
class RadStartDate(Radcheck):
objects = RadStartDateManager()
def __init__(self, *args, **kwargs):
self._meta.get_field('attribute').default = 'User-Start-Date'
self._meta.get_field('op').default = ':='
super(RadStartDate, self).__init__(*args, **kwargs)
def clean_fields(self, exclude=None):
super(RadStartDate, self).clean_fields(exclude)
if self.value:
try:
datetime.datetime.strptime(self.value, '%Y%m%d')
except ValueError:
raise ValidationError(_("Input date is not formatted as YYYYMMDD."))
def get_date(self):
if self.value:
return datetime.datetime.strptime(self.value, '%Y%m%d')
else:
return None
def get_absolute_url(self):
return reverse('admin_radius:user_edit', args=(self.username,))
class Meta:
proxy = True
class RadEndDate(Radcheck):
objects = RadEndDateManager()
def __init__(self, *args, **kwargs):
self._meta.get_field('attribute').default = 'User-End-Date'
self._meta.get_field('op').default = ':='
super(RadEndDate, self).__init__(*args, **kwargs)
def clean_fields(self, exclude=None):
super(RadEndDate, self).clean_fields(exclude)
if self.value:
try:
datetime.datetime.strptime(self.value, '%Y%m%d')
except ValueError:
raise ValidationError(_("Input date is not formatted as YYYYMMDD."))
def get_date(self):
if self.value:
return datetime.datetime.strptime(self.value, '%Y%m%d')
else:
return None
def get_absolute_url(self):
return reverse('admin_radius:user_edit', args=(self.username,))
class Meta:
proxy = True
class RadUser(models.Model):
username = models.CharField(max_length=64, unique=True)
start_date = models.OneToOneField(RadStartDate)
end_date = models.OneToOneField(RadEndDate)
password = models.OneToOneField(RadPass, blank=True, null=True)
@property
def is_online(self):
return Radacct.objects.filter(
username=self.username,
acctstoptime=None).exists()
"""
def clean(self):
# username must be consistent
if self.start_date and self.username and self.start_date.username != self.username:
raise ValidationError({'start_date': _('Usernames do not match.')})
if self.end_date and self.username and self.end_date.username != self.username:
raise ValidationError({'end_date': _('Usernames do not match.')})
if self.password and self.username and self.password.username != self.username:
raise ValidationError({'password': _('Usernames do not match.')})
"""
def get_absolute_url(self):
return reverse('admin_radius:user_edit', args=(self.username,))
def __str__(self):
return "<Raduser {}>".format(self.username) | mit | 73,345,521,450,151,820 | 36.449153 | 144 | 0.611815 | false |
YuxuanLing/trunk | trunk/code/study/python/Fluent-Python-example-code/21-class-metaprog/bulkfood/model_v8.py | 1 | 2193 | import abc
import collections
class AutoStorage:
__counter = 0
def __init__(self):
cls = self.__class__
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index)
cls.__counter += 1
def __get__(self, instance, owner):
if instance is None:
return self
else:
return getattr(instance, self.storage_name)
def __set__(self, instance, value):
setattr(instance, self.storage_name, value)
class Validated(abc.ABC, AutoStorage):
def __set__(self, instance, value):
value = self.validate(instance, value)
super().__set__(instance, value)
@abc.abstractmethod
def validate(self, instance, value):
"""return validated value or raise ValueError"""
class Quantity(Validated):
"""a number greater than zero"""
def validate(self, instance, value):
if value <= 0:
raise ValueError('value must be > 0')
return value
class NonBlank(Validated):
"""a string with at least one non-space character"""
def validate(self, instance, value):
value = value.strip()
if len(value) == 0:
raise ValueError('value cannot be empty or blank')
return value
# BEGIN MODEL_V8
class EntityMeta(type):
"""Metaclass for business entities with validated fields"""
@classmethod
def __prepare__(cls, name, bases):
return collections.OrderedDict() # <1>
def __init__(cls, name, bases, attr_dict):
super().__init__(name, bases, attr_dict)
cls._field_names = [] # <2>
for key, attr in attr_dict.items(): # <3>
if isinstance(attr, Validated):
type_name = type(attr).__name__
attr.storage_name = '_{}#{}'.format(type_name, key)
cls._field_names.append(key) # <4>
class Entity(metaclass=EntityMeta):
"""Business entity with validated fields"""
@classmethod
def field_names(cls): # <5>
for name in cls._field_names:
yield name
# END MODEL_V8
| gpl-3.0 | 8,952,538,520,680,736,000 | 25.4125 | 67 | 0.559052 | false |
diogo149/dooML | loss.py | 1 | 2707 | """Table of Contents
-modified_huber
-hinge
-squared_hinge
-log
-squared
-huber
-epsilon_insensitive
-squared_epislon_insensitive
-alpha_huber
-absolute
"""
import numpy as np
def modified_huber(p, y):
"""Modified Huber loss for binary classification with y in {-1, 1}; equivalent to quadratically smoothed SVM with gamma = 2
"""
z = p * y
loss = -4.0 * z
idx = z >= -1.0
loss[idx] = (z[idx] - 1.0) ** 2
loss[z >= 1.0] = 0.0
return loss
def hinge(p, y, threshold=1.0):
"""Hinge loss for binary classification tasks with y in {-1,1}
Parameters
----------
threshold : float > 0.0
Margin threshold. When threshold=1.0, one gets the loss used by SVM.
When threshold=0.0, one gets the loss used by the Perceptron.
"""
z = p * y
loss = threshold - z
loss[loss < 0] = 0.0
return loss
def squared_hinge(p, y, threshold=1.0):
"""Squared Hinge loss for binary classification tasks with y in {-1,1}
Parameters
----------
threshold : float > 0.0
Margin threshold. When threshold=1.0, one gets the loss used by
(quadratically penalized) SVM.
"""
return hinge(p, y, threshold) ** 2
def log(p, y):
"""Logistic regression loss for binary classification with y in {-1, 1}"""
z = p * y
return np.log(1.0 + np.exp(-z))
def squared(p, y):
"""Squared loss traditional used in linear regression."""
return 0.5 * (p - y) ** 2
def huber(p, y, epsilon=0.1):
"""Huber regression loss
Variant of the SquaredLoss that is robust to outliers (quadratic near zero,
linear in for large errors).
http://en.wikipedia.org/wiki/Huber_Loss_Function
"""
abs_r = np.abs(p - y)
loss = 0.5 * abs_r ** 2
idx = abs_r <= epsilon
loss[idx] = epsilon * abs_r[idx] - 0.5 * epsilon ** 2
return loss
def epsilon_insensitive(p, y, epsilon=0.1):
"""Epsilon-Insensitive loss (used by SVR).
loss = max(0, |y - p| - epsilon)
"""
loss = np.abs(y - p) - epsilon
loss[loss < 0.0] = 0.0
return loss
def squared_epislon_insensitive(p, y, epsilon=0.1):
"""Epsilon-Insensitive loss.
loss = max(0, |y - p| - epsilon)^2
"""
return epsilon_insensitive(p, y, epsilon) ** 2
def alpha_huber(p, y, alpha=0.9):
""" sets the epislon in huber loss equal to a percentile of the residuals
"""
abs_r = np.abs(p - y)
loss = 0.5 * abs_r ** 2
epsilon = np.percentile(loss, alpha * 100)
idx = abs_r <= epsilon
loss[idx] = epsilon * abs_r[idx] - 0.5 * epsilon ** 2
return loss
def absolute(p, y):
""" absolute value of loss
"""
return np.abs(p - y)
| gpl-3.0 | 2,330,747,692,834,036,700 | 22.53913 | 127 | 0.586997 | false |
DiamondLightSource/diffcalc | diffcalc/hkl/vlieg/calc.py | 1 | 32658 | ###
# Copyright 2008-2011 Diamond Light Source Ltd.
# This file is part of Diffcalc.
#
# Diffcalc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diffcalc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diffcalc. If not, see <http://www.gnu.org/licenses/>.
###
from math import pi, asin, acos, sin, cos, sqrt, atan2, fabs, atan
from diffcalc import settings
try:
from numpy import matrix
from numpy.linalg import norm
except ImportError:
from numjy import matrix
from numjy.linalg import norm
from diffcalc.hkl.calcbase import HklCalculatorBase
from diffcalc.hkl.vlieg.transform import TransformCInRadians
from diffcalc.util import dot3, cross3, bound, differ
from diffcalc.hkl.vlieg.geometry import createVliegMatrices, \
createVliegsPsiTransformationMatrix, \
createVliegsSurfaceTransformationMatrices, calcPHI
from diffcalc.hkl.vlieg.geometry import VliegPosition
from diffcalc.hkl.vlieg.constraints import VliegParameterManager
from diffcalc.hkl.vlieg.constraints import ModeSelector
from diffcalc.ub.calc import PaperSpecificUbCalcStrategy
TORAD = pi / 180
TODEG = 180 / pi
transformC = TransformCInRadians()
PREFER_POSITIVE_CHI_SOLUTIONS = True
I = matrix('1 0 0; 0 1 0; 0 0 1')
y = matrix('0; 1; 0')
def check(condition, ErrorOrStringOrCallable, *args):
"""
fail = check(condition, ErrorOrString) -- if condition is false raises the
Exception passed in, or creates one from a string. If a callable function
is passed in this is called with any args specified and the thing returns
false.
"""
# TODO: Remove (really nasty) check function
if condition == False:
if callable(ErrorOrStringOrCallable):
ErrorOrStringOrCallable(*args)
return False
elif isinstance(ErrorOrStringOrCallable, str):
raise Exception(ErrorOrStringOrCallable)
else: # assume input is an exception
raise ErrorOrStringOrCallable
return True
def sign(x):
if x < 0:
return -1
else:
return 1
def vliegAnglesToHkl(pos, wavelength, UBMatrix):
"""
Returns hkl indices from pos object in radians.
"""
wavevector = 2 * pi / wavelength
# Create transformation matrices
[ALPHA, DELTA, GAMMA, OMEGA, CHI, PHI] = createVliegMatrices(
pos.alpha, pos.delta, pos.gamma, pos.omega, pos.chi, pos.phi)
# Create the plane normal vector in the alpha axis coordinate frame
qa = ((DELTA * GAMMA) - ALPHA.I) * matrix([[0], [wavevector], [0]])
# Transform the plane normal vector from the alpha frame to reciprical
# lattice frame.
hkl = UBMatrix.I * PHI.I * CHI.I * OMEGA.I * qa
return hkl[0, 0], hkl[1, 0], hkl[2, 0]
class VliegUbCalcStrategy(PaperSpecificUbCalcStrategy):
def calculate_q_phi(self, pos):
[ALPHA, DELTA, GAMMA, OMEGA, CHI, PHI] = createVliegMatrices(
pos.alpha, pos.delta, pos.gamma, pos.omega, pos.chi, pos.phi)
u1a = (DELTA * GAMMA - ALPHA.I) * y
u1p = PHI.I * CHI.I * OMEGA.I * u1a
return u1p
class VliegHklCalculator(HklCalculatorBase):
def __init__(self, ubcalc,
raiseExceptionsIfAnglesDoNotMapBackToHkl=True):
r = raiseExceptionsIfAnglesDoNotMapBackToHkl
HklCalculatorBase.__init__(self, ubcalc,
raiseExceptionsIfAnglesDoNotMapBackToHkl=r)
self._gammaParameterName = ({'arm': 'gamma', 'base': 'oopgamma'}
[settings.geometry.gamma_location])
self.mode_selector = ModeSelector(settings.geometry, None,
self._gammaParameterName)
self.parameter_manager = VliegParameterManager(
settings.geometry, settings.hardware, self.mode_selector,
self._gammaParameterName)
self.mode_selector.setParameterManager(self.parameter_manager)
def __str__(self):
# should list paramemeters and indicate which are used in selected mode
result = "Available mode_selector:\n"
result += self.mode_selector.reportAvailableModes()
result += '\nCurrent mode:\n'
result += self.mode_selector.reportCurrentMode()
result += '\n\nParameters:\n'
result += self.parameter_manager.reportAllParameters()
return result
def _anglesToHkl(self, pos, wavelength):
"""
Return hkl tuple from VliegPosition in radians and wavelength in
Angstroms.
"""
return vliegAnglesToHkl(pos, wavelength, self._getUBMatrix())
def _anglesToVirtualAngles(self, pos, wavelength):
"""
Return dictionary of all virtual angles in radians from VliegPosition
object win radians and wavelength in Angstroms. The virtual angles are:
Bin, Bout, azimuth and 2theta.
"""
# Create transformation matrices
[ALPHA, DELTA, GAMMA, OMEGA, CHI, PHI] = createVliegMatrices(
pos.alpha, pos.delta, pos.gamma, pos.omega, pos.chi, pos.phi)
[SIGMA, TAU] = createVliegsSurfaceTransformationMatrices(
self._getSigma() * TORAD, self._getTau() * TORAD)
S = TAU * SIGMA
y_vector = matrix([[0], [1], [0]])
# Calculate Bin from equation 15:
surfacenormal_alpha = OMEGA * CHI * PHI * S * matrix([[0], [0], [1]])
incoming_alpha = ALPHA.I * y_vector
minusSinBetaIn = dot3(surfacenormal_alpha, incoming_alpha)
Bin = asin(bound(-minusSinBetaIn))
# Calculate Bout from equation 16:
# surfacenormal_alpha has just ben calculated
outgoing_alpha = DELTA * GAMMA * y_vector
sinBetaOut = dot3(surfacenormal_alpha, outgoing_alpha)
Bout = asin(bound(sinBetaOut))
# Calculate 2theta from equation 25:
cosTwoTheta = dot3(ALPHA * DELTA * GAMMA * y_vector, y_vector)
twotheta = acos(bound(cosTwoTheta))
psi = self._anglesToPsi(pos, wavelength)
return {'Bin': Bin, 'Bout': Bout, 'azimuth': psi, '2theta': twotheta}
def _hklToAngles(self, h, k, l, wavelength):
"""
Return VliegPosition and virtual angles in radians from h, k & l and
wavelength in Angstroms. The virtual angles are those fixed or
generated while calculating the position: Bin, Bout and 2theta; and
azimuth in four and five circle modes.
"""
if self._getMode().group in ("fourc", "fivecFixedGamma",
"fivecFixedAlpha"):
return self._hklToAnglesFourAndFiveCirclesModes(h, k, l,
wavelength)
elif self._getMode().group == "zaxis":
return self._hklToAnglesZaxisModes(h, k, l, wavelength)
else:
raise RuntimeError(
'The current mode (%s) has an unrecognised group: %s.'
% (self._getMode().name, self._getMode().group))
def _hklToAnglesFourAndFiveCirclesModes(self, h, k, l, wavelength):
"""
Return VliegPosition and virtual angles in radians from h, k & l and
wavelength in Angstrom for four and five circle modes. The virtual
angles are those fixed or generated while calculating the position:
Bin, Bout, 2theta and azimuth.
"""
# Results in radians during calculations, returned in degreess
pos = VliegPosition(None, None, None, None, None, None)
# Normalise hkl
wavevector = 2 * pi / wavelength
hklNorm = matrix([[h], [k], [l]]) / wavevector
# Compute hkl in phi axis coordinate frame
hklPhiNorm = self._getUBMatrix() * hklNorm
# Determine Bin and Bout
if self._getMode().name == '4cPhi':
Bin = Bout = None
else:
Bin, Bout = self._determineBinAndBoutInFourAndFiveCirclesModes(
hklNorm)
# Determine alpha and gamma
if self._getMode().group == 'fourc':
pos.alpha, pos.gamma = \
self._determineAlphaAndGammaForFourCircleModes(hklPhiNorm)
else:
pos.alpha, pos.gamma = \
self._determineAlphaAndGammaForFiveCircleModes(Bin, hklPhiNorm)
if pos.alpha < -pi:
pos.alpha += 2 * pi
if pos.alpha > pi:
pos.alpha -= 2 * pi
# Determine delta
(pos.delta, twotheta) = self._determineDelta(hklPhiNorm, pos.alpha,
pos.gamma)
# Determine omega, chi & phi
pos.omega, pos.chi, pos.phi, psi = \
self._determineSampleAnglesInFourAndFiveCircleModes(
hklPhiNorm, pos.alpha, pos.delta, pos.gamma, Bin)
# (psi will be None in fixed phi mode)
# Ensure that by default omega is between -90 and 90, by possibly
# transforming the sample angles
if self._getMode().name != '4cPhi': # not in fixed-phi mode
if pos.omega < -pi / 2 or pos.omega > pi / 2:
pos = transformC.transform(pos)
# Gather up the virtual angles calculated along the way...
# -pi<psi<=pi
if psi is not None:
if psi > pi:
psi -= 2 * pi
if psi < (-1 * pi):
psi += 2 * pi
v = {'2theta': twotheta, 'Bin': Bin, 'Bout': Bout, 'azimuth': psi}
return pos, v
def _hklToAnglesZaxisModes(self, h, k, l, wavelength):
"""
Return VliegPosition and virtual angles in radians from h, k & l and
wavelength in Angstroms for z-axis modes. The virtual angles are those
fixed or generated while calculating the position: Bin, Bout, and
2theta.
"""
# Section 6:
# Results in radians during calculations, returned in degreess
pos = VliegPosition(None, None, None, None, None, None)
# Normalise hkl
wavevector = 2 * pi / wavelength
hkl = matrix([[h], [k], [l]])
hklNorm = hkl * (1.0 / wavevector)
# Compute hkl in phi axis coordinate frame
hklPhi = self._getUBMatrix() * hkl
hklPhiNorm = self._getUBMatrix() * hklNorm
# Determine Chi and Phi (Equation 29):
pos.phi = -self._getTau() * TORAD
pos.chi = -self._getSigma() * TORAD
# Equation 30:
[ALPHA, DELTA, GAMMA, OMEGA, CHI, PHI] = createVliegMatrices(
None, None, None, None, pos.chi, pos.phi)
del ALPHA, DELTA, GAMMA, OMEGA
Hw = CHI * PHI * hklPhi
# Determine Bin and Bout:
(Bin, Bout) = self._determineBinAndBoutInZaxisModes(
Hw[2, 0] / wavevector)
# Determine Alpha and Gamma (Equation 32):
pos.alpha = Bin
pos.gamma = Bout
# Determine Delta:
(pos.delta, twotheta) = self._determineDelta(hklPhiNorm, pos.alpha,
pos.gamma)
# Determine Omega:
delta = pos.delta
gamma = pos.gamma
d1 = (Hw[1, 0] * sin(delta) * cos(gamma) - Hw[0, 0] *
(cos(delta) * cos(gamma) - cos(pos.alpha)))
d2 = (Hw[0, 0] * sin(delta) * cos(gamma) + Hw[1, 0] *
(cos(delta) * cos(gamma) - cos(pos.alpha)))
if fabs(d2) < 1e-30:
pos.omega = sign(d1) * sign(d2) * pi / 2.0
else:
pos.omega = atan2(d1, d2)
# Gather up the virtual angles calculated along the way
return pos, {'2theta': twotheta, 'Bin': Bin, 'Bout': Bout}
###
def _determineBinAndBoutInFourAndFiveCirclesModes(self, hklNorm):
"""(Bin, Bout) = _determineBinAndBoutInFourAndFiveCirclesModes()"""
BinModes = ('4cBin', '5cgBin', '5caBin')
BoutModes = ('4cBout', '5cgBout', '5caBout')
BeqModes = ('4cBeq', '5cgBeq', '5caBeq')
azimuthModes = ('4cAzimuth')
fixedBusingAndLeviWmodes = ('4cFixedw')
# Calculate RHS of equation 20
# RHS (1/K)(S^-1*U*B*H)_3 where H/K = hklNorm
UB = self._getUBMatrix()
[SIGMA, TAU] = createVliegsSurfaceTransformationMatrices(
self._getSigma() * TORAD, self._getTau() * TORAD)
#S = SIGMA * TAU
S = TAU * SIGMA
RHS = (S.I * UB * hklNorm)[2, 0]
if self._getMode().name in BinModes:
Bin = self._getParameter('betain')
check(Bin != None, "The parameter betain must be set for mode %s" %
self._getMode().name)
Bin = Bin * TORAD
sinBout = RHS - sin(Bin)
check(fabs(sinBout) <= 1, "Could not compute Bout")
Bout = asin(sinBout)
elif self._getMode().name in BoutModes:
Bout = self._getParameter('betaout')
check(Bout != None, "The parameter Bout must be set for mode %s" %
self._getMode().name)
Bout = Bout * TORAD
sinBin = RHS - sin(Bout)
check(fabs(sinBin) <= 1, "Could not compute Bin")
Bin = asin(sinBin)
elif self._getMode().name in BeqModes:
sinBeq = RHS / 2
check(fabs(sinBeq) <= 1, "Could not compute Bin=Bout")
Bin = Bout = asin(sinBeq)
elif self._getMode().name in azimuthModes:
azimuth = self._getParameter('azimuth')
check(azimuth != None, "The parameter azimuth must be set for "
"mode %s" % self._getMode().name)
del azimuth
# TODO: codeit
raise NotImplementedError()
elif self._getMode().name in fixedBusingAndLeviWmodes:
bandlomega = self._getParameter('blw')
check(bandlomega != None, "The parameter abandlomega must be set "
"for mode %s" % self._getMode().name)
del bandlomega
# TODO: codeit
raise NotImplementedError()
else:
raise RuntimeError("AngleCalculator does not know how to handle "
"mode %s" % self._getMode().name)
return (Bin, Bout)
def _determineBinAndBoutInZaxisModes(self, Hw3OverK):
"""(Bin, Bout) = _determineBinAndBoutInZaxisModes(HwOverK)"""
BinModes = ('6czBin')
BoutModes = ('6czBout')
BeqModes = ('6czBeq')
if self._getMode().name in BinModes:
Bin = self._getParameter('betain')
check(Bin != None, "The parameter betain must be set for mode %s" %
self._getMode().name)
Bin = Bin * TORAD
# Equation 32a:
Bout = asin(Hw3OverK - sin(Bin))
elif self._getMode().name in BoutModes:
Bout = self._getParameter('betaout')
check(Bout != None, "The parameter Bout must be set for mode %s" %
self._getMode().name)
Bout = Bout * TORAD
# Equation 32b:
Bin = asin(Hw3OverK - sin(Bout))
elif self._getMode().name in BeqModes:
# Equation 32c:
Bin = Bout = asin(Hw3OverK / 2)
return (Bin, Bout)
###
def _determineAlphaAndGammaForFourCircleModes(self, hklPhiNorm):
if self._getMode().group == 'fourc':
alpha = self._getParameter('alpha') * TORAD
gamma = self._getParameter(self._getGammaParameterName()) * TORAD
check(alpha != None, "alpha parameter must be set in fourc modes")
check(gamma != None, "gamma parameter must be set in fourc modes")
return alpha, gamma
else:
raise RuntimeError(
"determineAlphaAndGammaForFourCirclesModes() "
"is not appropriate for %s modes" % self._getMode().group)
def _determineAlphaAndGammaForFiveCircleModes(self, Bin, hklPhiNorm):
## Solve equation 34 for one possible Y, Yo
# Calculate surface normal in phi frame
[SIGMA, TAU] = createVliegsSurfaceTransformationMatrices(
self._getSigma() * TORAD, self._getTau() * TORAD)
S = TAU * SIGMA
surfaceNormalPhi = S * matrix([[0], [0], [1]])
# Compute beta in vector
BetaVector = matrix([[0], [-sin(Bin)], [cos(Bin)]])
# Find Yo
Yo = self._findMatrixToTransformAIntoB(surfaceNormalPhi, BetaVector)
## Calculate Hv from equation 39
Z = matrix([[1, 0, 0],
[0, cos(Bin), sin(Bin)],
[0, -sin(Bin), cos(Bin)]])
Hv = Z * Yo * hklPhiNorm
# Fixed gamma:
if self._getMode().group == 'fivecFixedGamma':
gamma = self._getParameter(self._getGammaParameterName())
check(gamma != None,
"gamma parameter must be set in fivecFixedGamma modes")
gamma = gamma * TORAD
H2 = (hklPhiNorm[0, 0] ** 2 + hklPhiNorm[1, 0] ** 2 +
hklPhiNorm[2, 0] ** 2)
a = -(0.5 * H2 * sin(Bin) - Hv[2, 0])
b = -(1.0 - 0.5 * H2) * cos(Bin)
c = cos(Bin) * sin(gamma)
check((b * b + a * a - c * c) >= 0, 'Could not solve for alpha')
alpha = 2 * atan2(-(b + sqrt(b * b + a * a - c * c)), -(a + c))
# Fixed Alpha:
elif self._getMode().group == 'fivecFixedAlpha':
alpha = self._getParameter('alpha')
check(alpha != None,
"alpha parameter must be set in fivecFixedAlpha modes")
alpha = alpha * TORAD
H2 = (hklPhiNorm[0, 0] ** 2 + hklPhiNorm[1, 0] ** 2 +
hklPhiNorm[2, 0] ** 2)
t0 = ((2 * cos(alpha) * Hv[2, 0] - sin(Bin) * cos(alpha) * H2 +
cos(Bin) * sin(alpha) * H2 - 2 * cos(Bin) * sin(alpha)) /
(cos(Bin) * 2.0))
check(abs(t0) <= 1, "Cannot compute gamma: sin(gamma)>1")
gamma = asin(t0)
else:
raise RuntimeError(
"determineAlphaAndGammaInFiveCirclesModes() is not "
"appropriate for %s modes" % self._getMode().group)
return (alpha, gamma)
###
def _determineDelta(self, hklPhiNorm, alpha, gamma):
"""
(delta, twotheta) = _determineDelta(hklPhiNorm, alpha, gamma) --
computes delta for all modes. Also returns twotheta for sanity
checking. hklPhiNorm is a 3X1 matrix.
alpha, gamma & delta - in radians.
h k & l normalised to wavevector and in phi axis coordinates
"""
h = hklPhiNorm[0, 0]
k = hklPhiNorm[1, 0]
l = hklPhiNorm[2, 0]
# See Vlieg section 5 (with K=1)
cosdelta = ((1 + sin(gamma) * sin(alpha) - (h * h + k * k + l * l) / 2)
/ (cos(gamma) * cos(alpha)))
costwotheta = (cos(alpha) * cos(gamma) * bound(cosdelta) -
sin(alpha) * sin(gamma))
return (acos(bound(cosdelta)), acos(bound(costwotheta)))
def _determineSampleAnglesInFourAndFiveCircleModes(self, hklPhiNorm, alpha,
delta, gamma, Bin):
"""
(omega, chi, phi, psi)=determineNonZAxisSampleAngles(hklPhiNorm, alpha,
delta, gamma, sigma, tau) where hkl has been normalised by the
wavevector and is in the phi Axis coordinate frame. All angles in
radians. hklPhiNorm is a 3X1 matrix
"""
def equation49through59(psi):
# equation 49 R = (D^-1)*PI*D*Ro
PSI = createVliegsPsiTransformationMatrix(psi)
R = D.I * PSI * D * Ro
# eq 57: extract omega from R
if abs(R[0, 2]) < 1e-20:
omega = -sign(R[1, 2]) * sign(R[0, 2]) * pi / 2
else:
omega = -atan2(R[1, 2], R[0, 2])
# eq 58: extract chi from R
sinchi = sqrt(pow(R[0, 2], 2) + pow(R[1, 2], 2))
sinchi = bound(sinchi)
check(abs(sinchi) <= 1, 'could not compute chi')
# (there are two roots to this equation, but only the first is also
# a solution to R33=cos(chi))
chi = asin(sinchi)
# eq 59: extract phi from R
if abs(R[2, 0]) < 1e-20:
phi = sign(R[2, 1]) * sign(R[2, 1]) * pi / 2
else:
phi = atan2(-R[2, 1], -R[2, 0])
return omega, chi, phi
def checkSolution(omega, chi, phi):
_, _, _, OMEGA, CHI, PHI = createVliegMatrices(
None, None, None, omega, chi, phi)
R = OMEGA * CHI * PHI
RtimesH_phi = R * H_phi
print ("R*H_phi=%s, Q_alpha=%s" %
(R * H_phi.tolist(), Q_alpha.tolist()))
return not differ(RtimesH_phi, Q_alpha, .0001)
# Using Vlieg section 7.2
# Needed througout:
[ALPHA, DELTA, GAMMA, _, _, _] = createVliegMatrices(
alpha, delta, gamma, None, None, None)
## Find Ro, one possible solution to equation 46: R*H_phi=Q_alpha
# Normalise hklPhiNorm (As it is currently normalised only to the
# wavevector)
normh = norm(hklPhiNorm)
check(normh >= 1e-10, "reciprical lattice vector too close to zero")
H_phi = hklPhiNorm * (1 / normh)
# Create Q_alpha from equation 47, (it comes normalised)
Q_alpha = ((DELTA * GAMMA) - ALPHA.I) * matrix([[0], [1], [0]])
Q_alpha = Q_alpha * (1 / norm(Q_alpha))
if self._getMode().name == '4cPhi':
### Use the fixed value of phi as the final constraint ###
phi = self._getParameter('phi') * TORAD
PHI = calcPHI(phi)
H_chi = PHI * H_phi
omega, chi = _findOmegaAndChiToRotateHchiIntoQalpha(H_chi, Q_alpha)
return (omega, chi, phi, None) # psi = None as not calculated
else:
### Use Bin as the final constraint ###
# Find a solution Ro to Ro*H_phi=Q_alpha
Ro = self._findMatrixToTransformAIntoB(H_phi, Q_alpha)
## equation 50: Find a solution D to D*Q=norm(Q)*[[1],[0],[0]])
D = self._findMatrixToTransformAIntoB(
Q_alpha, matrix([[1], [0], [0]]))
## Find psi and create PSI
# eq 54: compute u=D*Ro*S*[[0],[0],[1]], the surface normal in
# psi frame
[SIGMA, TAU] = createVliegsSurfaceTransformationMatrices(
self._getSigma() * TORAD, self._getTau() * TORAD)
S = TAU * SIGMA
[u1], [u2], [u3] = (D * Ro * S * matrix([[0], [0], [1]])).tolist()
# TODO: If u points along 100, then any psi is a solution. Choose 0
if not differ([u1, u2, u3], [1, 0, 0], 1e-9):
psi = 0
omega, chi, phi = equation49through59(psi)
else:
# equation 53: V=A*(D^-1)
V = ALPHA * D.I
v21 = V[1, 0]
v22 = V[1, 1]
v23 = V[1, 2]
# equation 55
a = v22 * u2 + v23 * u3
b = v22 * u3 - v23 * u2
c = -sin(Bin) - v21 * u1 # TODO: changed sign from paper
# equation 44
# Try first root:
def myatan2(y, x):
if abs(x) < 1e-20 and abs(y) < 1e-20:
return pi / 2
else:
return atan2(y, x)
psi = 2 * myatan2(-(b - sqrt(b * b + a * a - c * c)), -(a + c))
#psi = -acos(c/sqrt(a*a+b*b))+atan2(b,a)# -2*pi
omega, chi, phi = equation49through59(psi)
# if u points along z axis, the psi could have been either 0 or 180
if (not differ([u1, u2, u3], [0, 0, 1], 1e-9) and
abs(psi - pi) < 1e-10):
# Choose 0 to match that read up by angles-to-virtual-angles
psi = 0.
# if u points a long
return (omega, chi, phi, psi)
def _anglesToPsi(self, pos, wavelength):
"""
pos assumed in radians. -180<= psi <= 180
"""
# Using Vlieg section 7.2
# Needed througout:
[ALPHA, DELTA, GAMMA, OMEGA, CHI, PHI] = createVliegMatrices(
pos.alpha, pos.delta, pos.gamma, pos.omega, pos.chi, pos.phi)
# Solve equation 49 for psi, the rotation of the a reference solution
# about Qalpha or H_phi##
# Find Ro, the reference solution to equation 46: R*H_phi=Q_alpha
# Create Q_alpha from equation 47, (it comes normalised)
Q_alpha = ((DELTA * GAMMA) - ALPHA.I) * matrix([[0], [1], [0]])
Q_alpha = Q_alpha * (1 / norm(Q_alpha))
# Finh H_phi
h, k, l = self._anglesToHkl(pos, wavelength)
H_phi = self._getUBMatrix() * matrix([[h], [k], [l]])
normh = norm(H_phi)
check(normh >= 1e-10, "reciprical lattice vector too close to zero")
H_phi = H_phi * (1 / normh)
# Find a solution Ro to Ro*H_phi=Q_alpha
# This the reference solution with zero azimuth (psi)
Ro = self._findMatrixToTransformAIntoB(H_phi, Q_alpha)
# equation 48:
R = OMEGA * CHI * PHI
## equation 50: Find a solution D to D*Q=norm(Q)*[[1],[0],[0]])
D = self._findMatrixToTransformAIntoB(Q_alpha, matrix([[1], [0], [0]]))
# solve equation 49 for psi
# D*R = PSI*D*Ro
# D*R*(D*Ro)^-1 = PSI
PSI = D * R * ((D * Ro).I)
# Find psi within PSI as defined in equation 51
PSI_23 = PSI[1, 2]
PSI_33 = PSI[2, 2]
psi = atan2(PSI_23, PSI_33)
#print "PSI: ", PSI.tolist()
return psi
def _findMatrixToTransformAIntoB(self, a, b):
"""
Finds a particular matrix Mo that transforms the unit vector a into the
unit vector b. Thats is it finds Mo Mo*a=b. a and b 3x1 matrixes and Mo
is a 3x3 matrix.
Throws an exception if this is not possible.
"""
# Maths from the appendix of "Angle caluculations
# for a 5-circle diffractometer used for surface X-ray diffraction",
# E. Vlieg, J.F. van der Veen, J.E. Macdonald and M. Miller, J. of
# Applied Cryst. 20 (1987) 330.
# - courtesy of Elias Vlieg again
# equation A2: compute angle xi between vectors a and b
cosxi = dot3(a, b)
try:
cosxi = bound(cosxi)
except ValueError:
raise Exception("Could not compute cos(xi), vectors a=%f and b=%f "
"must be of unit length" % (norm(a), norm(b)))
xi = acos(cosxi)
# Mo is identity matrix if xi zero (math below would blow up)
if abs(xi) < 1e-10:
return I
# equation A3: c=cross(a,b)/sin(xi)
c = cross3(a, b) * (1 / sin(xi))
# equation A4: find D matrix that transforms a into the frame
# x = a; y = c x a; z = c. */
a1 = a[0, 0]
a2 = a[1, 0]
a3 = a[2, 0]
c1 = c[0, 0]
c2 = c[1, 0]
c3 = c[2, 0]
D = matrix([[a1, a2, a3],
[c2 * a3 - c3 * a2, c3 * a1 - c1 * a3, c1 * a2 - c2 * a1],
[c1, c2, c3]])
# equation A5: create Xi to rotate by xi about z-axis
XI = matrix([[cos(xi), -sin(xi), 0],
[sin(xi), cos(xi), 0],
[0, 0, 1]])
# eq A6: compute Mo
return D.I * XI * D
def _findOmegaAndChiToRotateHchiIntoQalpha(h_chi, q_alpha):
"""
(omega, chi) = _findOmegaAndChiToRotateHchiIntoQalpha(H_chi, Q_alpha)
Solves for omega and chi in OMEGA*CHI*h_chi = q_alpha where h_chi and
q_alpha are 3x1 matrices with unit length. Omega and chi are returned in
radians.
Throws an exception if this is not possible.
"""
def solve(a, b, c):
"""
x1,x2 = solve(a , b, c)
solves for the two solutions to x in equations of the form
a*sin(x) + b*cos(x) = c
by using the trigonometric identity
a*sin(x) + b*cos(x) = a*sin(x)+b*cos(x)=sqrt(a**2+b**2)-sin(x+p)
where
p = atan(b/a) + {0 if a>=0
{pi if a<0
"""
if a == 0:
p = pi / 2 if b >= 0 else - pi / 2
else:
p = atan(b / a)
if a < 0:
p = p + pi
guts = c / sqrt(a ** 2 + b ** 2)
if guts < -1:
guts = -1
elif guts > 1:
guts = 1
left1 = asin(guts)
left2 = pi - left1
return (left1 - p, left2 - p)
def ne(a, b):
"""
shifts a and b in between -pi and pi and tests for near equality
"""
def shift(a):
if a > pi:
return a - 2 * pi
elif a <= -pi:
return a + 2 * pi
else:
return a
return abs(shift(a) - shift(b)) < .0000001
# 1. Compute some solutions
h_chi1 = h_chi[0, 0]
h_chi2 = h_chi[1, 0]
h_chi3 = h_chi[2, 0]
q_alpha1 = q_alpha[0, 0]
q_alpha2 = q_alpha[1, 0]
q_alpha3 = q_alpha[2, 0]
try:
# a) Solve for chi using Equation 3
chi1, chi2 = solve(-h_chi1, h_chi3, q_alpha3)
# b) Solve for omega Equation 1 and each chi
B = h_chi1 * cos(chi1) + h_chi3 * sin(chi1)
eq1omega11, eq1omega12 = solve(h_chi2, B, q_alpha1)
B = h_chi1 * cos(chi2) + h_chi3 * sin(chi2)
eq1omega21, eq1omega22 = solve(h_chi2, B, q_alpha1)
# c) Solve for omega Equation 2 and each chi
A = -h_chi1 * cos(chi1) - h_chi3 * sin(chi1)
eq2omega11, eq2omega12 = solve(A, h_chi2, q_alpha2)
A = -h_chi1 * cos(chi2) - h_chi3 * sin(chi2)
eq2omega21, eq2omega22 = solve(A, h_chi2, q_alpha2)
except ValueError, e:
raise ValueError(
str(e) + ":\nProblem in fixed-phi calculation for:\nh_chi: " +
str(h_chi.tolist()) + " q_alpha: " + str(q_alpha.tolist()))
# 2. Choose values of chi and omega that are solutions to equations 1 and 2
solutions = []
# a) Check the chi1 solutions
print "_findOmegaAndChiToRotateHchiIntoQalpha:"
if ne(eq1omega11, eq2omega11) or ne(eq1omega11, eq2omega12):
# print "1: eq1omega11, chi1 = ", eq1omega11, chi1
solutions.append((eq1omega11, chi1))
if ne(eq1omega12, eq2omega11) or ne(eq1omega12, eq2omega12):
# print "2: eq1omega12, chi1 = ", eq1omega12, chi1
solutions.append((eq1omega12, chi1))
# b) Check the chi2 solutions
if ne(eq1omega21, eq2omega21) or ne(eq1omega21, eq2omega22):
# print "3: eq1omega21, chi2 = ", eq1omega21, chi2
solutions.append((eq1omega21, chi2))
if ne(eq1omega22, eq2omega21) or ne(eq1omega22, eq2omega22):
# print "4: eq1omega22, chi2 = ", eq1omega22, chi2
solutions.append((eq1omega22, chi2))
# print solutions
# print "*"
if len(solutions) == 0:
e = "h_chi: " + str(h_chi.tolist())
e += " q_alpha: " + str(q_alpha.tolist())
e += ("\nchi1:%4f eq1omega11:%4f eq1omega12:%4f eq2omega11:%4f "
"eq2omega12:%4f" % (chi1 * TODEG, eq1omega11 * TODEG,
eq1omega12 * TODEG, eq2omega11 * TODEG, eq2omega12 * TODEG))
e += ("\nchi2:%4f eq1omega21:%4f eq1omega22:%4f eq2omega21:%4f "
"eq2omega22:%4f" % (chi2 * TODEG, eq1omega21 * TODEG,
eq1omega22 * TODEG, eq2omega21 * TODEG, eq2omega22 * TODEG))
raise Exception("Could not find simultaneous solution for this fixed "
"phi mode problem\n" + e)
if not PREFER_POSITIVE_CHI_SOLUTIONS:
return solutions[0]
positive_chi_solutions = [sol for sol in solutions if sol[1] > 0]
if len(positive_chi_solutions) == 0:
print "WARNING: A +ve chi solution was requested, but none were found."
print " Returning a -ve one. Try the mapper"
return solutions[0]
if len(positive_chi_solutions) > 1:
print ("INFO: Multiple +ve chi solutions were found [(omega, chi) ...]"
" = " + str(positive_chi_solutions))
print " Returning the first"
return positive_chi_solutions[0]
| gpl-3.0 | -7,248,182,833,805,356,000 | 37.557261 | 79 | 0.552208 | false |
vollov/py-parser | src/xmlp/coverage.py | 1 | 2568 | #!/usr/bin/python
from xml.dom.minidom import parse
import xml.dom.minidom, os
current_directory = os.path.dirname(os.path.abspath(__file__))
data_directory = os.path.join(current_directory, '../data')
file_path = os.path.join(data_directory, 'coverages.xml')
# Open XML document using minidom parser
DOMTree = xml.dom.minidom.parse(file_path)
collection = DOMTree.documentElement
items = collection.getElementsByTagName("GIOSXMLEntry")
for item in items:
type_code = item.getElementsByTagName('GIOSXMLCd')[0]
occ_ind = item.getElementsByTagName('OccasionalDriverInd')
if occ_ind:
description = item.getElementsByTagName('EnglishDesc')[0]
print "TypeCode: {0} OCC: {1} desc: {2}".format(type_code.childNodes[0].data, occ_ind[0].childNodes[0].data, description.childNodes[0].data)
#
# <collection shelf="New Arrivals">
# <movie title="Enemy Behind">
# <type>War, Thriller</type>
# <format>DVD</format>
# <year>2003</year>
# <rating>PG</rating>
# <stars>10</stars>
# <description>Talk about a US-Japan war</description>
# </movie>
# <movie title="Transformers">
# <type>Anime, Science Fiction</type>
# <format>DVD</format>
# <year>1989</year>
# <rating>R</rating>
# <stars>8</stars>
# <description>A schientific fiction</description>
# </movie>
# <movie title="Trigun">
# <type>Anime, Action</type>
# <format>DVD</format>
# <episodes>4</episodes>
# <rating>PG</rating>
# <stars>10</stars>
# <description>Vash the Stampede!</description>
# </movie>
# <movie title="Ishtar">
# <type>Comedy</type>
# <format>VHS</format>
# <rating>PG</rating>
# <stars>2</stars>
# <description>Viewable boredom</description>
# </movie>
# </collection>
# if collection.hasAttribute("shelf"):
# print "Root element : %s" % collection.getAttribute("shelf")
# Get all the movies in the collection
# movies = collection.getElementsByTagName("movie")
# Print detail of each movie.
# for movie in movies:
# print "*****Movie*****"
# if movie.hasAttribute("title"):
# print "Title: %s" % movie.getAttribute("title")
#
# type = movie.getElementsByTagName('type')[0]
# print "Type: %s" % type.childNodes[0].data
# format = movie.getElementsByTagName('format')[0]
# print "Format: %s" % format.childNodes[0].data
# rating = movie.getElementsByTagName('rating')[0]
# print "Rating: %s" % rating.childNodes[0].data
# description = movie.getElementsByTagName('description')[0]
# print "Description: %s" % description.childNodes[0].data | mit | 2,440,359,789,388,073,000 | 30.716049 | 151 | 0.666667 | false |
endrjuskr/studies | MRJP/LatteCompilerPython/src/lattepar.py | 1 | 10557 | __author__ = 'Andrzej Skrodzki - as292510'
from .LatteParsers.LatteTypes import *
from .LatteParsers.LatteExpressions import *
from .LatteParsers.LatteParameters import *
from .LatteParsers.LatteStatements import *
from .LatteParsers.LatteTopDefinitions import *
from .LatteExceptions import *
import ply.yacc as yacc
from tokrules import tokens
exception_list = []
precedence = (
('nonassoc', 'GE', 'GT', 'LE', 'LT', 'NE'),
('right', 'AND', 'OR'),
('nonassoc', 'EQ'),
('left', 'PLUS', 'MINUS'),
('left', 'PLUSPLUS', 'MINUSMINUS'),
('right', 'UNOT', 'UMINUS'),
)
# Program definition
def p_program(p):
'program : listtopdef'
p[0] = Program(p[1])
# List definitions
def p_list_expr(p):
'''listexpr :
| expr
| listexpr COMMA expr'''
if len(p) == 1:
# empty list
p[0] = []
elif len(p) == 2:
# last expression
p[0] = [p[1]]
else:
# list of expressions
p[0] = p[1]
p[0].append(p[3])
def p_list_topdef(p):
'''listtopdef :
| topdef
| listtopdef topdef'''
if len(p) == 1:
# empty list
p[0] = []
elif len(p) == 2:
# last function definition
p[0] = [p[1]]
else:
# list of function definitions
p[0] = p[1]
p[0].append(p[2])
def p_list_stmt(p):
'''liststmt : stmt
| liststmt stmt'''
if len(p) == 2:
# last statement
p[0] = [p[1]]
else:
# list of statements
p[0] = p[1]
p[0].append(p[2])
def p_list_fields(p):
'''listfields : field
| listfields field'''
if len(p) == 2:
# last statement
p[0] = [p[1]]
else:
# list of statements
p[0] = p[1]
p[0].append(p[2])
def p_list_item(p):
'''listitem : item
| listitem COMMA item'''
if len(p) == 1:
# empty list
p[0] = []
elif len(p) == 2:
# last item
p[0] = [p[1]]
else:
# list of items
p[0] = p[1]
p[0].append(p[3])
def p_list_arg(p):
'''listarg :
| arg
| listarg COMMA arg'''
if len(p) == 1:
# empty list
p[0] = []
elif len(p) == 2:
# last argument
p[0] = [p[1]]
else:
#list of arguments
p[0] = p[1]
p[0].append(p[3])
# Item productions
def p_item_noinit(p):
'item : ID'
p[0] = NoInitItem(p[1], p.lineno(1), p.lexpos(1))
def p_item_init(p):
'item : ID EQUALS expr'
p[0] = InitItem(p[1], p[3], p.lineno(1), p.lexpos(1))
# Argument definition
def p_arg(p):
'arg : type ID'
p[0] = Arg(p[1], p[2], p.lineno(2), p.lexpos(2))
def p_arg_o(p):
'arg : ID ID'
p[0] = Arg(Type(p[1]), p[2], p.lineno(2), p.lexpos(2))
def p_arg_oa(p):
'arg : ID LARRAY RARRAY ID'
p[0] = Arg(ArrayType(Type(p[1])), p[4], p.lineno(2), p.lexpos(2))
def p_field_s(p):
'field : type ID SEMI'
p[0] = Field(p[1], p[2], p.lineno(2), p.lexpos(2))
def p_field_o(p):
'field : ID ID SEMI'
p[0] = Field(Type(p[1]), p[2], p.lineno(2), p.lexpos(2))
def p_field_oa(p):
'field : ID LARRAY RARRAY ID SEMI'
p[0] = Field(ArrayType(Type(p[1])), p[4], p.lineno(2), p.lexpos(2))
def p_methoddef(p):
'field : type ID LPAREN listarg RPAREN block'
p[0] = FnDef(p[1], p[2], p[4], p[6], p.lineno(2))
def p_methoddef_o(p):
'field : ID ID LPAREN listarg RPAREN block'
p[0] = FnDef(Type(p[1]), p[2], p[4], p[6], p.lineno(2))
def p_methoddef_oa(p):
'field : ID LARRAY RARRAY ID LPAREN listarg RPAREN block'
p[0] = FnDef(ArrayType(Type(p[1])), p[4], p[6], p[8], p.lineno(2))
# Function definition
def p_class_extends(p):
'''ext :
| EXTENDS ID'''
if len(p) == 1:
p[0] = []
elif len(p) == 3:
p[0] = [p[2]]
def p_classdef(p):
'topdef : CLASS ID ext LBRACE listfields RBRACE'
p[0] = ClassDef(p[2], p[3], p[5], p.lineno(2))
def p_fndef(p):
'topdef : type ID LPAREN listarg RPAREN block'
p[0] = FnDef(p[1], p[2], p[4], p[6], p.lineno(2))
def p_fndef_o(p):
'topdef : ID ID LPAREN listarg RPAREN block'
p[0] = FnDef(Type(p[1]), p[2], p[4], p[6], p.lineno(2))
def p_fndef_oa(p):
'topdef : ID LARRAY RARRAY ID LPAREN listarg RPAREN block'
p[0] = FnDef(ArrayType(Type(p[1])), p[4], p[6], p[8], p.lineno(2))
def p_block(p):
'''block : LBRACE RBRACE
| LBRACE liststmt RBRACE'''
if len(p) == 3:
p[0] = Block([])
else:
p[0] = Block(p[2])
# Statement definitions
def p_statement_empty(p):
'stmt : SEMI'
p[0] = EmptyStmt(p.lineno(1), p.lexpos(1))
def p_statement_block(p):
'stmt : block'
p[0] = BStmt(p[1], p.lineno(1))
def p_statement_decl(p):
'stmt : type listitem SEMI'
p[0] = DeclStmt(p[1], p[2], p.lineno(3), p.lexpos(3))
def p_statement_decl_0(p):
'stmt : ID listitem SEMI'
p[0] = DeclStmt(Type(p[1]), p[2], p.lineno(1), p.lexpos(1))
def p_statement_decl_1(p):
'stmt : ID LARRAY RARRAY listitem SEMI'
p[0] = DeclStmt(ArrayType(Type(p[1])), p[4], p.lineno(1), p.lexpos(1))
def p_statement_var_ass(p):
'''stmt : expr6 EQUALS expr SEMI '''
p[0] = VarAssStmt(p[1], p[3], p.lineno(2), p.lexpos(2))
def p_statement_incr(p):
'stmt : expr6 PLUSPLUS SEMI'
p[0] = IncrStmt(p[1], p.lineno(2), p.lexpos(2))
def p_statement_decr(p):
'stmt : expr6 MINUSMINUS SEMI'
p[0] = DecrStmt(p[1], p.lineno(2), p.lexpos(2))
def p_statement_ret(p):
'stmt : RETURN expr SEMI'
p[0] = RetStmt(p[2], p.lineno(1), p.lexpos(1))
def p_statement_vret(p):
'stmt : RETURN SEMI'
p[0] = VRetStmt(p.lineno(1), p.lexpos(1))
def p_statement_cond(p):
'stmt : IF LPAREN expr RPAREN stmt'
p[0] = CondStmt(p[3], p[5], p.lineno(1), p.lexpos(1))
def p_statement_condelse(p):
'stmt : IF LPAREN expr RPAREN stmt ELSE stmt'
p[0] = CondElseStmt(p[3], p[5], p[7], p.lineno(1), p.lexpos(1))
def p_statement_while(p):
'stmt : WHILE LPAREN expr RPAREN stmt'
p[0] = WhileStmt(p[3], p[5], p.lineno(1), p.lexpos(1))
def p_statement_sexp(p):
'stmt : expr SEMI'
p[0] = SExpStmt(p[1], p.lineno(2), p.lexpos(2))
def p_statement_for(p):
'stmt : FOR LPAREN type_s ID COL expr RPAREN stmt'
p[0] = ForStmt(p[4], p[3], p[6], p[8], p.lineno(1), p.lexpos(1))
def p_statement_for_2(p):
'stmt : FOR LPAREN ID ID COL expr RPAREN stmt'
p[0] = ForStmt(p[4], Type(p[3]), p[6], p[8], p.lineno(1), p.lexpos(1))
# Expression definitions
def p_expression_array_init(p):
'expr6 : NEW type_s LARRAY expr RARRAY'
p[0] = EArrayInit(p[2], p[4], p.lineno(1), p.lexpos(1))
def p_expression_array_init_2(p):
'expr6 : NEW ID LARRAY expr RARRAY'
p[0] = EArrayInit(Type(p[2]), p[4], p.lineno(1), p.lexpos(1))
def p_expression_object_init(p):
'expr6 : NEW ID'
p[0] = EObjectInit(Type(p[2]), p.lineno(1), p.lexpos(1))
def p_expression_var(p):
'expr6 : ID'
p[0] = EVar(p[1], p.lineno(1), p.lexpos(1))
def p_expression_field(p):
'expr6 : expr6 DOT ID'
p[0] = EObjectField(p[1], p[3], p.lineno(2), p.lexpos(2))
def p_expression_field_a(p):
'expr6 : expr6 DOT ID LARRAY expr RARRAY'
p[0] = EObjectFieldApp(p[1], p[3], p[5], p.lineno(2), p.lexpos(2))
def p_expression_array(p):
'expr6 : ID LARRAY expr RARRAY'
p[0] = EArrayApp(p[1], p[3], p.lineno(2), p.lexpos(2))
def p_expression_int(p):
'expr6 : NUMBER'
p[0] = ELitInt(p[1], p.lineno(1), p.lexpos(1))
def p_expression_null(p):
'''expr6 : LPAREN ID RPAREN NULL '''
p[0] = ELitNull(p[2], p.lineno(1), p.lexpos(1))
def p_expression_boolean(p):
'''expr6 : TRUE
| FALSE'''
p[0] = ELitBoolean(p[1], p.lineno(1), p.lexpos(1))
def p_expression_app(p):
'expr6 : ID LPAREN listexpr RPAREN'
p[0] = EApp(p[1], p[3], p.lineno(2), p.lexpos(2))
def p_expression_method_app(p):
'expr6 : expr6 DOT ID LPAREN listexpr RPAREN'
p[0] = EMethodApp(p[1], p[3], p[5], p.lineno(2), p.lexpos(2))
def p_expression_group(p):
'expr6 : LPAREN expr RPAREN'
p[0] = p[2]
def p_expression_string(p):
'expr6 : SENTENCE'
p[0] = EString(p[1], p.lineno(1), p.lexpos(1))
def p_expression_neg(p):
'expr5 : MINUS expr6 %prec UMINUS'
p[0] = ENeg(p[2], p.lineno(1), p.lexpos(1))
def p_expression_not_1(p):
'''expr5 : expr6'''
p[0] = p[1]
def p_expression_not_2(p):
'''expr5 : NOT expr6 %prec UNOT'''
p[0] = ENot(p[2], p.lineno(1), p.lexpos(1))
def p_expression_mul_1(p):
'''expr4 : expr5'''
p[0] = p[1]
def p_mulop(p):
'''mulop : TIMES
| DIVIDE
| MOD'''
p[0] = p[1]
def p_expression_mul_2(p):
'''expr4 : expr4 mulop expr5'''
p[0] = EMul(p[1], p[3], p[2], p[1].no_line, p[1].pos + 1)
def p_addop(p):
'''addop : PLUS
| MINUS'''
p[0] = p[1]
def p_expression_add_1(p):
'''expr3 : expr3 addop expr4'''
p[0] = EAdd(p[1], p[3], p[2], p[1].no_line, p[1].pos + 1)
def p_expression_add_3(p):
'''expr3 : expr4'''
p[0] = p[1]
def p_relop(p):
'''relop : LT
| LE
| GT
| GE
| EQ
| NE'''
p[0] = p[1]
def p_expression_rel_1(p):
'''expr2 : expr2 relop expr3'''
p[0] = ERel(p[1], p[3], p[2], p[1].no_line, p[1].pos + 1)
def p_expression_rel_2(p):
'''expr2 : expr3'''
p[0] = p[1]
def p_expression_and_1(p):
'''expr1 : expr2 AND expr1'''
p[0] = EAnd(p[1], p[3], p.lineno(2), p.lexpos(2))
def p_expression_and_2(p):
'''expr1 : expr2'''
p[0] = p[1]
def p_expression_or_1(p):
'''expr : expr1 OR expr'''
p[0] = EOr(p[1], p[3], p.lineno(2), p.lexpos(2))
def p_expression_or_2(p):
'''expr : expr1'''
p[0] = p[1]
# Type definition
def p_type_s(p):
'''type_s : INT
| STRING
| VOID
| BOOLEAN '''
p[0] = Type(p[1])
def p_type_1(p):
'''type : type_s'''
p[0] = p[1]
def p_type_a(p):
'''type : type_s LARRAY RARRAY'''
p[0] = ArrayType(p[1])
# Error definition
def p_error(p):
if p is None:
return
exception_list.append(SyntaxException("Wrong expression '" + str(p.value) + "'.", p.lineno, pos=p.lexpos))
tok = None
while 1:
tok = yacc.token()
if not tok:
break
if tok.type == 'SEMI':
tok = yacc.token()
yacc.errok()
return tok
def get_parser():
return yacc.yacc(write_tables=0, debug=0, outputdir="src") | apache-2.0 | -9,036,227,321,942,147,000 | 19.906931 | 110 | 0.531401 | false |
GearPlug/batchbook-python | tests/test_client.py | 1 | 2241 | import os
import time
from unittest import TestCase
from batchbook.client import Client
class BatchbookTestCases(TestCase):
def setUp(self):
self.api_key = os.environ.get('apikey')
self.account_name = os.environ.get('account_name')
self.my_contact = {
"person":
{
"prefix":"Nuevo",
"first_name":"Nuevo",
"middle_name":"Nuevo",
"last_name":"Nuevo",
"emails":[
{
"address":"[email protected]",
"label":"work",
"primary": True
}],
}
}
self.client = Client(api_key=self.api_key, account_name=self.account_name)
def test_name_url(self):
_url = "https://{0}.batchbook.com/api/v1".format(self.account_name)
result = self.client._get_name_url(_url)
self.assertEqual(self.account_name, result)
def test_get_contacts(self):
result_create = self.client.create_contact(data=self.my_contact)
time.sleep(10)
result = self.client.get_contacts()
_id = ""
for r in result:
if (r['id'] == result_create['id']):
_id = r['id']
self.assertEqual(_id, result_create['id'])
self.client.delete_contact(contact_id=result_create['id'])
def test_get_contact(self):
result_create = self.client.create_contact(data=self.my_contact)
result = self.client.get_contact(result_create['id'])
self.assertEqual(result['id'], result_create['id'])
self.client.delete_contact(contact_id=result_create['id'])
def test_delete_contact(self):
result_create = self.client.create_contact(data=self.my_contact)
self.client.delete_contact(contact_id=result_create['id'])
try:
self.client.get_contact(result_create['id'])
result = False
except:
result = True
self.assertTrue(result)
| mit | -2,660,502,318,319,371,300 | 35.737705 | 82 | 0.499331 | false |
ninjawil/weather-station | scripts/setup.py | 1 | 4064 | #-------------------------------------------------------------------------------
#
# The MIT License (MIT)
#
# Copyright (c) 2015 William De Freitas
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#-------------------------------------------------------------------------------
#!/usr/bin/env python
'''Sets up the enviroment to run the weather station.
Begins by checking that an RRD file exists and that the data sources are
correct. If no RRD file found then create a new one.
Initates scripts via cronjobs.'''
#===============================================================================
# Import modules
#===============================================================================
# Standard Library
import os
import sys
import time
# Third party modules
# Application modules
import log
import settings as s
import rrd_tools
#===============================================================================
# MAIN
#===============================================================================
def main():
'''Entry point for script'''
script_name = os.path.basename(sys.argv[0])
#---------------------------------------------------------------------------
# Set up logger
#---------------------------------------------------------------------------
logger = log.setup('root', '{folder}/logs/{script}.log'.format(
folder= s.SYS_FOLDER,
script= script_name[:-3]))
logger.info('')
logger.info('--- Script {script} Started ---'.format(script= script_name))
#---------------------------------------------------------------------------
# SET UP RRD DATA AND TOOL
#---------------------------------------------------------------------------
rrd = rrd_tools.RrdFile('{fd1}{fd2}{fl}'.format(fd1= s.SYS_FOLDER,
fd2= s.DATA_FOLDER,
fl= s.RRDTOOL_RRD_FILE))
if not os.path.exists(s.RRDTOOL_RRD_FILE):
rrd.create_file(s.SENSOR_SET,
s.RRDTOOL_RRA,
s.UPDATE_RATE,
s.RRDTOOL_HEARTBEAT,
int(time.time() + s.UPDATE_RATE))
logger.info('RRD file not found. New file created')
elif sorted(rrd.ds_list()) != sorted(list(s.SENSOR_SET.keys())):
logger.error('Data sources in RRD file does not match set up.')
sys.exit()
else:
logger.info('RRD file found and checked OK')
#---------------------------------------------------------------------------
# SCRIPTS
#---------------------------------------------------------------------------
#Set up CRONTAB
#===============================================================================
# BOILER PLATE
#===============================================================================
if __name__=='__main__':
sys.exit(main())
| mit | -6,399,027,101,030,141,000 | 36.62963 | 80 | 0.450295 | false |
ameya30/IMaX_pole_data_scripts | imax_lp_max_/imax_rotate_Q_U.py | 1 | 3065 | import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.optimize import minimize_scalar
from astropy.io import fits
def Q_eq(phi):
# Set inputs for fnction
sumU = 0
for wv in range(0, 5):
uSing = uMes[wv]
qSing = qMes[wv]
uNew = -1 * qSing * np.sin(phi) + uSing * np.cos(phi)
sumU += np.square(uNew)
return sumU
plot_figs = 0
# Load in array
input_list = glob.glob('../Data/mean_rem_output_*.fits')
for i in range (0, len(input_list)):
fullArr = fits.open(input_list[i])
fullArr = fullArr[0].data
save_name = '../Data/imax_lp_max_' + input_list[i].split('_')[3]
save_angs = '../Data/imax_roat_angle_Q_U_' + input_list[i].split('_')[3]
print save_name
print save_angs
fullDim = np.shape(fullArr)
angArr = np.empty(shape = (fullDim[2], fullDim[3]))
uNew = np.empty(shape = (fullDim[1], fullDim[2], fullDim[3]))
qNew = np.empty(shape = (fullDim[1], fullDim[2], fullDim[3]))
for x in range (0, fullDim[3]):
for y in range (0, fullDim[2]):
qMes = fullArr[1, :, y, x]
uMes = fullArr[2, :, y, x]
res = minimize_scalar(Q_eq, bounds=(0, np.pi), method='bounded')
angle = res['x']
angArr[y, x] = angle
for wv in range (0, 5):
uNew[wv, y, x] = -1 * fullArr[1, wv, y, x] * np.sin(angle) + fullArr[2, wv, y, x] * np.cos(angle)
qNew[wv, y, x] = fullArr[1, wv, y, x] * np.cos(angle) + fullArr[2, wv, y, x] * np.sin(angle)
hdu_ang = fits.PrimaryHDU(angArr)
hdu_max = fits.PrimaryHDU(qNew)
hdu_ang.writeto(save_angs)
hdu_max.writeto(save_name)
if plot_figs == 1:
fig, axes = plt.subplots(ncols = 3,
nrows = 1,
figsize = (12, 18))
fig.subplots_adjust(left = 0.04,
right = 0.97,
top = 0.99,
bottom = 0.05,
wspace = 0.08,
hspace = 0.15)
imAng = axes[0].imshow(angArr[:, :], cmap = 'gray', clim = (-0.05,0.05))
minpl = axes[1].imshow(qNew[1, :, :], cmap = 'gray', clim = (-0.05,0.05))
maxpl = axes[2].imshow(uNew[1, :, :], cmap = 'gray', clim = (-0.05,0.05))
for ax in axes:
ax.invert_yaxis()
plt.savefig('../Figures/imax_angle_arr.png', dpi = 500)
fig, axes = plt.subplots(ncols = 2,
nrows = 2,
figsize = (12, 6))
axes[1,1].plot(uNew[:, 102, 636])
axes[0,0].plot(fullArr[1,:, 102, 636])
axes[0,1].plot(fullArr[2,:, 102, 636])
axes[1,0].plot(qNew[:, 102, 636])
axes[0,0].set_title('Mesured Q')
axes[0,1].set_title('Mesured U')
axes[1,0].set_title('new Q' )
axes[1,1].set_title('new U')
plt.savefig('../Figures/imax_roation_line_change.png', dpi = 500)
| mit | -1,030,299,242,345,325,800 | 29.346535 | 114 | 0.501142 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.